convert_hf_to_gguf.py 448 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import ast
  5. import logging
  6. import argparse
  7. import contextlib
  8. import json
  9. import os
  10. import re
  11. import sys
  12. from enum import IntEnum
  13. from pathlib import Path
  14. from hashlib import sha256
  15. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  16. from itertools import chain
  17. from transformers import AutoConfig
  18. import math
  19. import numpy as np
  20. import torch
  21. if TYPE_CHECKING:
  22. from torch import Tensor
  23. if 'NO_LOCAL_GGUF' not in os.environ:
  24. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  25. import gguf
  26. from gguf.vocab import MistralTokenizerType, MistralVocab
  27. try:
  28. from mistral_common.tokens.tokenizers.base import TokenizerVersion # pyright: ignore[reportMissingImports]
  29. from mistral_common.tokens.tokenizers.multimodal import DATASET_MEAN as _MISTRAL_COMMON_DATASET_MEAN, DATASET_STD as _MISTRAL_COMMON_DATASET_STD # pyright: ignore[reportMissingImports]
  30. from mistral_common.tokens.tokenizers.tekken import Tekkenizer # pyright: ignore[reportMissingImports]
  31. from mistral_common.tokens.tokenizers.sentencepiece import ( # pyright: ignore[reportMissingImports]
  32. SentencePieceTokenizer,
  33. )
  34. _mistral_common_installed = True
  35. _mistral_import_error_msg = ""
  36. except ImportError:
  37. _MISTRAL_COMMON_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
  38. _MISTRAL_COMMON_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
  39. _mistral_common_installed = False
  40. TokenizerVersion = None
  41. Tekkenizer = None
  42. SentencePieceTokenizer = None
  43. _mistral_import_error_msg = (
  44. "Mistral format requires `mistral-common` to be installed. Please run "
  45. "`pip install mistral-common[image,audio]` to install it."
  46. )
  47. logger = logging.getLogger("hf-to-gguf")
  48. ###### MODEL DEFINITIONS ######
  49. class SentencePieceTokenTypes(IntEnum):
  50. NORMAL = 1
  51. UNKNOWN = 2
  52. CONTROL = 3
  53. USER_DEFINED = 4
  54. UNUSED = 5
  55. BYTE = 6
  56. class ModelType(IntEnum):
  57. TEXT = 1
  58. MMPROJ = 2
  59. AnyModel = TypeVar("AnyModel", bound="type[ModelBase]")
  60. class ModelBase:
  61. _model_classes: dict[ModelType, dict[str, type[ModelBase]]] = {
  62. ModelType.TEXT: {},
  63. ModelType.MMPROJ: {},
  64. }
  65. dir_model: Path
  66. ftype: gguf.LlamaFileType
  67. fname_out: Path
  68. is_big_endian: bool
  69. endianess: gguf.GGUFEndian
  70. use_temp_file: bool
  71. lazy: bool
  72. dry_run: bool
  73. hparams: dict[str, Any]
  74. model_tensors: dict[str, Callable[[], Tensor]]
  75. gguf_writer: gguf.GGUFWriter
  76. model_name: str | None
  77. metadata_override: Path | None
  78. dir_model_card: Path
  79. remote_hf_model_id: str | None
  80. # subclasses should define this!
  81. model_arch: gguf.MODEL_ARCH
  82. # subclasses should initialize this!
  83. block_count: int
  84. tensor_map: gguf.TensorNameMap
  85. # Mistral format specifics
  86. is_mistral_format: bool = False
  87. disable_mistral_community_chat_template: bool = False
  88. sentence_transformers_dense_modules: bool = False
  89. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, *, is_big_endian: bool = False,
  90. use_temp_file: bool = False, eager: bool = False,
  91. metadata_override: Path | None = None, model_name: str | None = None,
  92. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
  93. small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None,
  94. disable_mistral_community_chat_template: bool = False,
  95. sentence_transformers_dense_modules: bool = False):
  96. if type(self) is ModelBase or \
  97. type(self) is TextModel or \
  98. type(self) is MmprojModel:
  99. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  100. if self.is_mistral_format and not _mistral_common_installed:
  101. raise ImportError(_mistral_import_error_msg)
  102. self.dir_model = dir_model
  103. self.ftype = ftype
  104. self.fname_out = fname_out
  105. self.is_big_endian = is_big_endian
  106. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  107. self.use_temp_file = use_temp_file
  108. self.lazy = not eager or (remote_hf_model_id is not None)
  109. self.dry_run = dry_run
  110. self.remote_hf_model_id = remote_hf_model_id
  111. self.sentence_transformers_dense_modules = sentence_transformers_dense_modules
  112. self.hparams = ModelBase.load_hparams(self.dir_model, self.is_mistral_format) if hparams is None else hparams
  113. self.model_tensors = self.index_tensors(remote_hf_model_id=remote_hf_model_id)
  114. self.metadata_override = metadata_override
  115. self.model_name = model_name
  116. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  117. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  118. if self.ftype == gguf.LlamaFileType.GUESSED:
  119. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  120. _, first_tensor = next(self.get_tensors())
  121. if first_tensor.dtype == torch.float16:
  122. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  123. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  124. else:
  125. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  126. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  127. self.dequant_model()
  128. # Configure GGUF Writer
  129. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  130. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  131. # Mistral specific
  132. self.disable_mistral_community_chat_template = disable_mistral_community_chat_template
  133. @classmethod
  134. def add_prefix_to_filename(cls, path: Path, prefix: str) -> Path:
  135. stem, suffix = path.stem, path.suffix
  136. new_name = f"{prefix}{stem}{suffix}"
  137. return path.with_name(new_name)
  138. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  139. key = next((k for k in keys if k in self.hparams), None)
  140. if key is not None:
  141. return self.hparams[key]
  142. if optional:
  143. return None
  144. raise KeyError(f"could not find any of: {keys}")
  145. def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Callable[[], Tensor]]:
  146. tensors: dict[str, Callable[[], Tensor]] = {}
  147. if remote_hf_model_id is not None:
  148. is_safetensors = True
  149. logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}")
  150. remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id)
  151. for name, remote_tensor in remote_tensors.items():
  152. tensors[name] = lambda r=remote_tensor: LazyTorchTensor.from_remote_tensor(r)
  153. return tensors
  154. prefix = "model" if not self.is_mistral_format else "consolidated"
  155. part_names: list[str] = ModelBase.get_model_part_names(self.dir_model, prefix, ".safetensors")
  156. is_safetensors: bool = len(part_names) > 0
  157. if not is_safetensors:
  158. part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  159. tensor_names_from_index: set[str] = set()
  160. if not self.is_mistral_format:
  161. index_name = "model.safetensors" if is_safetensors else "pytorch_model.bin"
  162. index_name += ".index.json"
  163. index_file = self.dir_model / index_name
  164. if index_file.is_file():
  165. logger.info(f"gguf: loading model weight map from '{index_name}'")
  166. with open(index_file, "r", encoding="utf-8") as f:
  167. index: dict[str, Any] = json.load(f)
  168. weight_map = index.get("weight_map")
  169. if weight_map is None or not isinstance(weight_map, dict):
  170. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  171. tensor_names_from_index.update(weight_map.keys())
  172. else:
  173. weight_map = {}
  174. else:
  175. weight_map = {}
  176. for part_name in part_names:
  177. logger.info(f"gguf: indexing model part '{part_name}'")
  178. ctx: ContextManager[Any]
  179. if is_safetensors:
  180. from safetensors import safe_open
  181. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  182. else:
  183. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  184. with ctx as model_part:
  185. assert model_part is not None
  186. for name in model_part.keys():
  187. if is_safetensors:
  188. if self.lazy:
  189. data = model_part.get_slice(name)
  190. data_gen = lambda data=data: LazyTorchTensor.from_safetensors_slice(data) # noqa: E731
  191. else:
  192. data = model_part.get_tensor(name)
  193. data_gen = lambda data=data: data # noqa: E731
  194. else:
  195. data = model_part[name]
  196. if self.lazy:
  197. data_gen = lambda data=data: LazyTorchTensor.from_eager(data) # noqa: E731
  198. else:
  199. data_gen = lambda data=data: data # noqa: E731
  200. tensors[name] = data_gen
  201. # verify tensor name presence and identify potentially missing files
  202. if len(tensor_names_from_index) > 0:
  203. tensor_names_from_parts = set(tensors.keys())
  204. if len(tensor_names_from_parts.symmetric_difference(tensor_names_from_index)) > 0:
  205. missing = sorted(tensor_names_from_index.difference(tensor_names_from_parts))
  206. extra = sorted(tensor_names_from_parts.difference(tensor_names_from_index))
  207. missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
  208. if len(extra) == 0 and len(missing_files) > 0:
  209. raise ValueError(f"Missing or incomplete model files: {missing_files}\n"
  210. f"Missing tensors: {missing}")
  211. else:
  212. raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
  213. f"Missing tensors: {missing}\n"
  214. f"Extra tensors: {extra}")
  215. return tensors
  216. def dequant_model(self):
  217. tensors_to_remove: list[str] = []
  218. new_tensors: dict[str, Callable[[], Tensor]] = {}
  219. if (quant_config := self.hparams.get("quantization_config")) and isinstance(quant_config, dict):
  220. quant_method = quant_config.get("quant_method")
  221. def dequant_bitnet(weight: Tensor, scale: Tensor) -> Tensor:
  222. weight = weight.view(torch.uint8)
  223. orig_shape = weight.shape
  224. shift = torch.tensor([0, 2, 4, 6], dtype=torch.uint8).reshape((4, *(1 for _ in range(len(orig_shape)))))
  225. data = weight.unsqueeze(0).expand((4, *orig_shape)) >> shift
  226. data = data & 3
  227. data = (data.float() - 1).reshape((orig_shape[0] * 4, *orig_shape[1:]))
  228. # The scale is inverted
  229. return data / scale.float()
  230. def dequant_simple(weight: Tensor, scale: Tensor) -> Tensor:
  231. scale = scale.float()
  232. if (weight_block_size := quant_config.get("weight_block_size")):
  233. # TODO: make sure it's a list of integers
  234. for i, size in enumerate(weight_block_size):
  235. scale = scale.repeat_interleave(size, i)
  236. # unpad the scale (e.g. when the tensor size isn't a multiple of the block size)
  237. scale = scale[tuple(slice(0, size) for size in weight.shape)]
  238. return weight.float() * scale
  239. # ref: https://github.com/ModelCloud/GPTQModel/blob/037c5c0f6c9e33c500d975b038d02e7ca437546d/gptqmodel/nn_modules/qlinear/__init__.py#L437-L476
  240. def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) -> Tensor:
  241. bits = quant_config["bits"]
  242. assert bits in (2, 3, 4, 8)
  243. assert qweight.dtype == qzeros.dtype
  244. maxq = (2 ** bits) - 1
  245. weight = None
  246. zeros = None
  247. pack_dtype_bits = qweight.dtype.itemsize * 8
  248. if bits in [2, 4, 8]:
  249. pack_factor = pack_dtype_bits // bits
  250. wf = torch.tensor(list(range(0, pack_dtype_bits, bits)), dtype=torch.int32).unsqueeze(0)
  251. if self.lazy:
  252. wf = LazyTorchTensor.from_eager(wf)
  253. zeros = torch.bitwise_right_shift(
  254. qzeros.unsqueeze(2).expand(-1, -1, pack_factor),
  255. wf.unsqueeze(0)
  256. ).to(torch.int16 if bits == 8 else torch.int8)
  257. zeros = torch.bitwise_and(zeros, maxq).reshape(scales.shape)
  258. weight = torch.bitwise_and(
  259. torch.bitwise_right_shift(
  260. qweight.unsqueeze(1).expand(-1, pack_factor, -1),
  261. wf.unsqueeze(-1)
  262. ).to(torch.int16 if bits == 8 else torch.int8),
  263. maxq
  264. )
  265. elif bits == 3:
  266. raise NotImplementedError("3-bit gptq dequantization is not yet implemented")
  267. assert weight is not None
  268. assert zeros is not None
  269. weight = weight.reshape(weight.shape[0] * weight.shape[1], weight.shape[2])
  270. # gptq_v2 doesn't need to offset zeros
  271. if quant_config.get("checkpoint_format", "gptq") == "gptq":
  272. zeros += 1
  273. return (scales[g_idx].float() * (weight - zeros[g_idx]).float()).T
  274. if quant_method == "bitnet":
  275. for name in self.model_tensors.keys():
  276. if name.endswith(".weight_scale"):
  277. weight_name = name.removesuffix("_scale")
  278. w = self.model_tensors[weight_name]
  279. s = self.model_tensors[name]
  280. self.model_tensors[weight_name] = lambda w=w, s=s: dequant_bitnet(w(), s())
  281. tensors_to_remove.append(name)
  282. elif quant_method == "fp8":
  283. for name in self.model_tensors.keys():
  284. if name.endswith(".weight_scale_inv"):
  285. weight_name = name.removesuffix("_scale_inv")
  286. w = self.model_tensors[weight_name]
  287. s = self.model_tensors[name]
  288. self.model_tensors[weight_name] = lambda w=w, s=s: dequant_simple(w(), s())
  289. tensors_to_remove.append(name)
  290. elif quant_method == "gptq":
  291. for name in self.model_tensors.keys():
  292. if name.endswith(".qweight"):
  293. base_name = name.removesuffix(".qweight")
  294. g_idx = self.model_tensors[base_name + ".g_idx"]
  295. qweight = self.model_tensors[base_name + ".qweight"]
  296. qzeros = self.model_tensors[base_name + ".qzeros"]
  297. scales = self.model_tensors[base_name + ".scales"]
  298. new_tensors[base_name + ".weight"] = (
  299. lambda g=g_idx, z=qzeros, w=qweight, s=scales: dequant_gptq(
  300. g(), w(), z(), s()
  301. )
  302. )
  303. tensors_to_remove += [
  304. base_name + n
  305. for n in (
  306. ".g_idx",
  307. ".qzeros",
  308. ".qweight",
  309. ".scales",
  310. )
  311. ]
  312. else:
  313. raise NotImplementedError(f"Quant method is not yet supported: {quant_method!r}")
  314. for name in tensors_to_remove:
  315. if name in self.model_tensors:
  316. del self.model_tensors[name]
  317. for name, value in new_tensors.items():
  318. self.model_tensors[name] = value
  319. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  320. for name, gen in self.model_tensors.items():
  321. yield name, gen()
  322. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  323. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  324. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  325. name: str = gguf.TENSOR_NAMES[key]
  326. if "{bid}" in name:
  327. assert bid is not None
  328. name = name.format(bid=bid)
  329. return name + suffix
  330. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  331. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  332. return False
  333. key_name: str = gguf.TENSOR_NAMES[key]
  334. if "{bid}" in key_name:
  335. if bid is None:
  336. return False
  337. key_name = key_name.format(bid=bid)
  338. else:
  339. if bid is not None:
  340. return False
  341. return name == (key_name + suffix)
  342. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  343. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  344. if new_name is None:
  345. raise ValueError(f"Can not map tensor {name!r}")
  346. return new_name
  347. def set_gguf_parameters(self):
  348. raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses")
  349. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  350. del bid # unused
  351. return [(self.map_tensor_name(name), data_torch)]
  352. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  353. del name, new_name, bid, n_dims # unused
  354. return False
  355. # some models need extra generated tensors (like rope_freqs)
  356. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  357. return ()
  358. def prepare_tensors(self):
  359. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  360. for name, data_torch in chain(self.generate_extra_tensors(), self.get_tensors()):
  361. # we don't need these
  362. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  363. continue
  364. old_dtype = data_torch.dtype
  365. # convert any unsupported data types to float32
  366. if data_torch.dtype not in (torch.float16, torch.float32):
  367. data_torch = data_torch.to(torch.float32)
  368. # use the first number-like part of the tensor name as the block id
  369. bid = None
  370. for part in name.split("."):
  371. if part.isdecimal():
  372. bid = int(part)
  373. break
  374. for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
  375. # TODO: why do we squeeze here?
  376. # data = data_torch.squeeze().numpy()
  377. data = data_torch.numpy()
  378. n_dims = len(data.shape)
  379. data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
  380. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  381. if n_dims <= 1 or new_name.endswith("_norm.weight"):
  382. data_qtype = gguf.GGMLQuantizationType.F32
  383. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  384. # Some tensor types are always in float32
  385. if data_qtype is False and (
  386. any(
  387. self.match_model_tensor_name(new_name, key, bid)
  388. for key in (
  389. gguf.MODEL_TENSOR.FFN_GATE_INP,
  390. gguf.MODEL_TENSOR.POS_EMBD,
  391. gguf.MODEL_TENSOR.TOKEN_TYPES,
  392. gguf.MODEL_TENSOR.SSM_CONV1D,
  393. gguf.MODEL_TENSOR.SHORTCONV_CONV,
  394. gguf.MODEL_TENSOR.TIME_MIX_FIRST,
  395. gguf.MODEL_TENSOR.TIME_MIX_W1,
  396. gguf.MODEL_TENSOR.TIME_MIX_W2,
  397. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
  398. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
  399. gguf.MODEL_TENSOR.TIME_MIX_LERP_FUSED,
  400. gguf.MODEL_TENSOR.POSNET_NORM1,
  401. gguf.MODEL_TENSOR.POSNET_NORM2,
  402. gguf.MODEL_TENSOR.V_ENC_EMBD_POS,
  403. gguf.MODEL_TENSOR.A_ENC_EMBD_POS,
  404. gguf.MODEL_TENSOR.ALTUP_CORRECT_COEF,
  405. gguf.MODEL_TENSOR.ALTUP_PREDICT_COEF,
  406. )
  407. )
  408. or not new_name.endswith(".weight")
  409. ):
  410. data_qtype = gguf.GGMLQuantizationType.F32
  411. if data_qtype is False and any(
  412. self.match_model_tensor_name(new_name, key, bid)
  413. for key in (
  414. gguf.MODEL_TENSOR.TOKEN_EMBD,
  415. gguf.MODEL_TENSOR.PER_LAYER_TOKEN_EMBD,
  416. gguf.MODEL_TENSOR.OUTPUT,
  417. gguf.MODEL_TENSOR.ALTUP_ROUTER,
  418. gguf.MODEL_TENSOR.LAUREL_L,
  419. gguf.MODEL_TENSOR.LAUREL_R,
  420. )
  421. ):
  422. if self.ftype in (
  423. gguf.LlamaFileType.MOSTLY_TQ1_0,
  424. gguf.LlamaFileType.MOSTLY_TQ2_0,
  425. ):
  426. # TODO: use Q4_K and Q6_K
  427. data_qtype = gguf.GGMLQuantizationType.F16
  428. # No override (data_qtype is False), or wants to be quantized (data_qtype is True)
  429. if isinstance(data_qtype, bool):
  430. if self.ftype == gguf.LlamaFileType.ALL_F32:
  431. data_qtype = gguf.GGMLQuantizationType.F32
  432. elif self.ftype == gguf.LlamaFileType.MOSTLY_F16:
  433. data_qtype = gguf.GGMLQuantizationType.F16
  434. elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  435. data_qtype = gguf.GGMLQuantizationType.BF16
  436. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
  437. data_qtype = gguf.GGMLQuantizationType.Q8_0
  438. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
  439. data_qtype = gguf.GGMLQuantizationType.TQ1_0
  440. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
  441. data_qtype = gguf.GGMLQuantizationType.TQ2_0
  442. else:
  443. raise ValueError(f"Unknown file type: {self.ftype.name}")
  444. try:
  445. data = gguf.quants.quantize(data, data_qtype)
  446. except gguf.QuantError as e:
  447. logger.warning("%s, %s", e, "falling back to F16")
  448. data_qtype = gguf.GGMLQuantizationType.F16
  449. data = gguf.quants.quantize(data, data_qtype)
  450. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  451. # reverse shape to make it similar to the internal ggml dimension order
  452. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  453. # n_dims is implicit in the shape
  454. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  455. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  456. def set_type(self):
  457. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  458. def prepare_metadata(self, vocab_only: bool):
  459. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  460. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  461. # If we are using HF model id, set the metadata name to the model id
  462. if self.remote_hf_model_id:
  463. self.metadata.name = self.remote_hf_model_id
  464. # Fallback to model directory name if metadata name is still missing
  465. if self.metadata.name is None:
  466. self.metadata.name = self.dir_model.name
  467. # Generate parameter weight class (useful for leader boards) if not yet determined
  468. if self.metadata.size_label is None and total_params > 0:
  469. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  470. self.set_type()
  471. logger.info("Set meta model")
  472. self.metadata.set_gguf_meta_model(self.gguf_writer)
  473. logger.info("Set model parameters")
  474. self.set_gguf_parameters()
  475. logger.info("Set model quantization version")
  476. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  477. def write_vocab(self):
  478. raise NotImplementedError("write_vocab() must be implemented in subclasses")
  479. def write(self):
  480. self.prepare_tensors()
  481. self.prepare_metadata(vocab_only=False)
  482. self.gguf_writer.write_header_to_file(path=self.fname_out)
  483. self.gguf_writer.write_kv_data_to_file()
  484. self.gguf_writer.write_tensors_to_file(progress=True)
  485. self.gguf_writer.close()
  486. @staticmethod
  487. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  488. part_names: list[str] = []
  489. for filename in os.listdir(dir_model):
  490. if filename.startswith(prefix) and filename.endswith(suffix):
  491. part_names.append(filename)
  492. part_names.sort()
  493. return part_names
  494. @staticmethod
  495. def load_hparams(dir_model: Path, is_mistral_format: bool):
  496. if is_mistral_format:
  497. with open(dir_model / "params.json", "r", encoding="utf-8") as f:
  498. config = json.load(f)
  499. return config
  500. try:
  501. # for security reason, we don't allow loading remote code by default
  502. # if a model need remote code, we will fallback to config.json
  503. config = AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict()
  504. except Exception as e:
  505. logger.warning(f"Failed to load model config from {dir_model}: {e}")
  506. logger.warning("Trying to load config.json instead")
  507. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  508. config = json.load(f)
  509. if "llm_config" in config:
  510. # rename for InternVL
  511. config["text_config"] = config["llm_config"]
  512. if "thinker_config" in config:
  513. # rename for Qwen2.5-Omni
  514. config["text_config"] = config["thinker_config"]["text_config"]
  515. return config
  516. @classmethod
  517. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  518. assert names
  519. def func(modelcls: AnyModel) -> AnyModel:
  520. model_type = ModelType.MMPROJ if modelcls.model_arch == gguf.MODEL_ARCH.MMPROJ else ModelType.TEXT
  521. for name in names:
  522. cls._model_classes[model_type][name] = modelcls
  523. return modelcls
  524. return func
  525. @classmethod
  526. def print_registered_models(cls):
  527. for model_type, model_classes in cls._model_classes.items():
  528. logger.error(f"{model_type.name} models:")
  529. for name in sorted(model_classes.keys()):
  530. logger.error(f" - {name}")
  531. @classmethod
  532. def from_model_architecture(cls, arch: str, model_type = ModelType.TEXT) -> type[ModelBase]:
  533. try:
  534. return cls._model_classes[model_type][arch]
  535. except KeyError:
  536. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  537. class TextModel(ModelBase):
  538. model_type = ModelType.TEXT
  539. hf_arch: str
  540. def __init__(self, *args, **kwargs):
  541. super().__init__(*args, **kwargs)
  542. if not self.is_mistral_format:
  543. self.hf_arch = get_model_architecture(self.hparams, self.model_type)
  544. else:
  545. self.hf_arch = ""
  546. if "text_config" in self.hparams:
  547. # move the text_config to the root level
  548. self.hparams = {**self.hparams, **self.hparams["text_config"]}
  549. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  550. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  551. @classmethod
  552. def __init_subclass__(cls):
  553. # can't use an abstract property, because overriding it without type errors
  554. # would require using decorated functions instead of simply defining the property
  555. if "model_arch" not in cls.__dict__:
  556. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  557. def set_vocab(self):
  558. self._set_vocab_gpt2()
  559. def prepare_metadata(self, vocab_only: bool):
  560. super().prepare_metadata(vocab_only=vocab_only)
  561. total_params = self.gguf_writer.get_total_parameter_count()[0]
  562. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  563. output_type: str = self.ftype.name.partition("_")[2]
  564. # Filename Output
  565. if self.fname_out.is_dir():
  566. # Generate default filename based on model specification and available metadata
  567. if not vocab_only:
  568. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  569. else:
  570. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  571. # Use the default filename
  572. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  573. else:
  574. # Output path is a custom defined templated filename
  575. # Note: `not is_dir()` is used because `.is_file()` will not detect
  576. # file template strings as it doesn't actually exist as a file
  577. # Process templated file name with the output ftype, useful with the "auto" ftype
  578. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  579. logger.info("Set model tokenizer")
  580. self.set_vocab()
  581. def set_gguf_parameters(self):
  582. self.gguf_writer.add_block_count(self.block_count)
  583. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length"], optional=True)) is not None:
  584. self.gguf_writer.add_context_length(n_ctx)
  585. logger.info(f"gguf: context length = {n_ctx}")
  586. if (n_embd := self.find_hparam(["hidden_size", "n_embd", "dim"], optional=True)) is not None:
  587. self.gguf_writer.add_embedding_length(n_embd)
  588. logger.info(f"gguf: embedding length = {n_embd}")
  589. if (n_ff := self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"], optional=True)) is not None:
  590. self.gguf_writer.add_feed_forward_length(n_ff)
  591. logger.info(f"gguf: feed forward length = {n_ff}")
  592. if (n_head := self.find_hparam(["num_attention_heads", "n_head", "n_heads"], optional=True)) is not None:
  593. self.gguf_writer.add_head_count(n_head)
  594. logger.info(f"gguf: head count = {n_head}")
  595. if (n_head_kv := self.find_hparam(["num_key_value_heads", "n_kv_heads"], optional=True)) is not None:
  596. self.gguf_writer.add_head_count_kv(n_head_kv)
  597. logger.info(f"gguf: key-value head count = {n_head_kv}")
  598. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  599. self.gguf_writer.add_rope_freq_base(rope_theta)
  600. logger.info(f"gguf: rope theta = {rope_theta}")
  601. if (f_rms_eps := self.find_hparam(["rms_norm_eps", "norm_eps"], optional=True)) is not None:
  602. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  603. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  604. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  605. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  606. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  607. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  608. self.gguf_writer.add_expert_count(n_experts)
  609. logger.info(f"gguf: expert count = {n_experts}")
  610. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  611. self.gguf_writer.add_expert_used_count(n_experts_used)
  612. logger.info(f"gguf: experts used count = {n_experts_used}")
  613. if (head_dim := self.hparams.get("head_dim")) is not None:
  614. self.gguf_writer.add_key_length(head_dim)
  615. self.gguf_writer.add_value_length(head_dim)
  616. self.gguf_writer.add_file_type(self.ftype)
  617. logger.info(f"gguf: file type = {self.ftype}")
  618. def write_vocab(self):
  619. if len(self.gguf_writer.tensors) != 1:
  620. raise ValueError('Splitting the vocabulary is not supported')
  621. self.prepare_metadata(vocab_only=True)
  622. self.gguf_writer.write_header_to_file(path=self.fname_out)
  623. self.gguf_writer.write_kv_data_to_file()
  624. self.gguf_writer.close()
  625. def does_token_look_special(self, token: str | bytes) -> bool:
  626. if isinstance(token, (bytes, bytearray)):
  627. token_text = token.decode(encoding="utf-8")
  628. elif isinstance(token, memoryview):
  629. token_text = token.tobytes().decode(encoding="utf-8")
  630. else:
  631. token_text = token
  632. # Some models mark some added tokens which ought to be control tokens as not special.
  633. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  634. seems_special = token_text in (
  635. "<pad>", # deepseek-coder
  636. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  637. )
  638. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  639. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  640. # TODO: should these be marked as UNUSED instead? (maybe not)
  641. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  642. return seems_special
  643. # used for GPT-2 BPE and WordPiece vocabs
  644. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  645. tokens: list[str] = []
  646. toktypes: list[int] = []
  647. from transformers import AutoTokenizer
  648. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  649. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  650. assert max(tokenizer.vocab.values()) < vocab_size
  651. tokpre = self.get_vocab_base_pre(tokenizer)
  652. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  653. added_vocab = tokenizer.get_added_vocab()
  654. added_tokens_decoder = tokenizer.added_tokens_decoder
  655. for i in range(vocab_size):
  656. if i not in reverse_vocab:
  657. tokens.append(f"[PAD{i}]")
  658. toktypes.append(gguf.TokenType.UNUSED)
  659. else:
  660. token: str = reverse_vocab[i]
  661. if token in added_vocab:
  662. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  663. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  664. if not added_tokens_decoder[i].normalized:
  665. previous_token = token
  666. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  667. if previous_token != token:
  668. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  669. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  670. toktypes.append(gguf.TokenType.CONTROL)
  671. else:
  672. # NOTE: this was added for Gemma.
  673. # Encoding and decoding the tokens above isn't sufficient for this case.
  674. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  675. toktypes.append(gguf.TokenType.USER_DEFINED)
  676. else:
  677. toktypes.append(gguf.TokenType.NORMAL)
  678. tokens.append(token)
  679. return tokens, toktypes, tokpre
  680. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  681. # do not modify it manually!
  682. # ref: https://github.com/ggml-org/llama.cpp/pull/6920
  683. # Marker: Start get_vocab_base_pre
  684. def get_vocab_base_pre(self, tokenizer) -> str:
  685. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  686. # is specific for the BPE pre-tokenizer used by the model
  687. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  688. # use in llama.cpp to implement the same pre-tokenizer
  689. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  690. chktok = tokenizer.encode(chktxt)
  691. chkhsh = sha256(str(chktok).encode()).hexdigest()
  692. logger.debug(f"chktok: {chktok}")
  693. logger.debug(f"chkhsh: {chkhsh}")
  694. res = None
  695. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  696. # or pull the latest version of the model from Huggingface
  697. # don't edit the hashes manually!
  698. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
  699. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  700. res = "chatglm-bpe"
  701. if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
  702. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  703. res = "chatglm-bpe"
  704. if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
  705. # ref: https://huggingface.co/THUDM/glm-4-9b-hf
  706. res = "glm4"
  707. if chkhsh == "9ca2dd618e8afaf09731a7cf6e2105b373ba6a1821559f258b272fe83e6eb902":
  708. # ref: https://huggingface.co/zai-org/GLM-4.5-Air
  709. res = "glm4"
  710. if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
  711. # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
  712. res = "minerva-7b"
  713. if chkhsh == "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664":
  714. # ref: https://huggingface.co/tencent/Hunyuan-A13B-Instruct
  715. res = "hunyuan"
  716. if chkhsh == "bba3b3366b646dbdded5dbc42d59598b849371afc42f7beafa914afaa5b70aa6":
  717. # ref: https://huggingface.co/tencent/Hunyuan-4B-Instruct
  718. res = "hunyuan-dense"
  719. if chkhsh == "a6b57017d60e6edb4d88ecc2845188e0eb333a70357e45dcc9b53964a73bbae6":
  720. # ref: https://huggingface.co/tiiuae/Falcon-H1-0.5B-Base
  721. res = "falcon-h1"
  722. if chkhsh == "60476e1243776c4fb1b993dbd7a5f15ac22f83c80afdf425fa5ae01c8d44ef86":
  723. # ref: https://huggingface.co/tiiuae/Falcon-H1-1B-Base
  724. res = "falcon-h1"
  725. if chkhsh == "3eda48b4c4dc7de733d1a8b3e3b4a85243dbbf704da2ee9d42c6beced8897896":
  726. # ref: https://huggingface.co/tiiuae/Falcon-H1-7B-Base
  727. res = "falcon-h1"
  728. if chkhsh == "48f8e02c0359c0bbdd82f26909171fac1c18a457bb47573ed1fe3bbb2c1cfd4b":
  729. # ref: https://huggingface.co/tiiuae/Falcon-H1-34B-Base
  730. res = "falcon-h1"
  731. if chkhsh == "81212dc7cdb7e0c1074ca62c5aeab0d43c9f52b8a737be7b12a777c953027890":
  732. # ref: https://huggingface.co/moonshotai/Kimi-K2-Base
  733. res = "kimi-k2"
  734. if chkhsh == "d4540891389ea895b53b399da6ac824becc30f2fba0e9ddbb98f92e55ca0e97c":
  735. # ref: https://huggingface.co/Qwen/Qwen3-Embedding-0.6B
  736. res = "qwen2"
  737. if chkhsh == "66b8d4e19ab16c3bfd89bce5d785fb7e0155e8648708a1f42077cb9fe002c273":
  738. # ref: https://huggingface.co/alvarobartt/grok-2-tokenizer
  739. res = "grok-2"
  740. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  741. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  742. res = "llama-bpe"
  743. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  744. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  745. res = "deepseek-llm"
  746. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  747. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  748. res = "deepseek-coder"
  749. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  750. # ref: https://huggingface.co/tiiuae/falcon-7b
  751. res = "falcon"
  752. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  753. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  754. res = "bert-bge"
  755. if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e":
  756. # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base
  757. res = "falcon3"
  758. if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
  759. # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
  760. res = "bert-bge-large"
  761. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  762. # ref: https://huggingface.co/mosaicml/mpt-7b
  763. res = "mpt"
  764. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  765. # ref: https://huggingface.co/bigcode/starcoder2-3b
  766. res = "starcoder"
  767. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  768. # ref: https://huggingface.co/openai-community/gpt2
  769. res = "gpt-2"
  770. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  771. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  772. res = "stablelm2"
  773. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  774. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  775. res = "refact"
  776. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  777. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  778. res = "command-r"
  779. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  780. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  781. res = "qwen2"
  782. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  783. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  784. res = "olmo"
  785. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  786. # ref: https://huggingface.co/databricks/dbrx-base
  787. res = "dbrx"
  788. if chkhsh == "c7699093ba4255a91e702aa38a596aa81669f3525dae06c2953267dde580f448":
  789. # ref: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  790. res = "jina-v1-en"
  791. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  792. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  793. res = "jina-v2-en"
  794. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  795. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  796. res = "jina-v2-es"
  797. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  798. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  799. res = "jina-v2-de"
  800. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  801. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  802. res = "smaug-bpe"
  803. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  804. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  805. res = "poro-chat"
  806. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  807. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  808. res = "jina-v2-code"
  809. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  810. # ref: https://huggingface.co/LumiOpen/Viking-7B
  811. res = "viking"
  812. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  813. # ref: https://huggingface.co/core42/jais-13b
  814. res = "jais"
  815. if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f":
  816. # ref: https://huggingface.co/WisdomShell/CodeShell-7B
  817. res = "codeshell"
  818. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  819. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  820. res = "tekken"
  821. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  822. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  823. res = "smollm"
  824. if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
  825. # ref: https://huggingface.co/bigscience/bloom
  826. res = "bloom"
  827. if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
  828. # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
  829. res = "gpt3-finnish"
  830. if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
  831. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
  832. res = "exaone"
  833. if chkhsh == "fcace8b9cac38ce847670c970cd5892031a753a1ef381abd1d9af00f713da085":
  834. # ref: https://huggingface.co/microsoft/phi-2
  835. res = "phi-2"
  836. if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
  837. # ref: https://huggingface.co/facebook/chameleon-7b
  838. res = "chameleon"
  839. if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65":
  840. # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base
  841. res = "roberta-bpe"
  842. if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb":
  843. # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct
  844. res = "gigachat"
  845. if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
  846. # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
  847. res = "megrez"
  848. if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
  849. # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
  850. res = "deepseek-v3"
  851. if chkhsh == "b3f499bb4255f8ca19fccd664443283318f2fd2414d5e0b040fbdd0cc195d6c5":
  852. # ref: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
  853. res = "deepseek-r1-qwen"
  854. if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e":
  855. # ref: https://huggingface.co/Xenova/gpt-4o
  856. res = "gpt-4o"
  857. if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
  858. # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
  859. res = "superbpe"
  860. if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
  861. # ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
  862. res = "trillion"
  863. if chkhsh == "96a5f08be6259352137b512d4157e333e21df7edd3fcd152990608735a65b224":
  864. # ref: https://huggingface.co/inclusionAI/Ling-lite
  865. res = "bailingmoe"
  866. if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
  867. # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
  868. res = "llama4"
  869. if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3":
  870. # ref: https://huggingface.co/mistral-community/pixtral-12b
  871. res = "pixtral"
  872. if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
  873. # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
  874. res = "seed-coder"
  875. if chkhsh == "b0a6b1c0bd5998ebd9df08611efde34a4ff03faed45ae09c43e6b31ebd4b94cf":
  876. # ref: https://huggingface.co/skt/A.X-4.0
  877. res = "a.x-4.0"
  878. if chkhsh == "f6791d196f87ce6b56a7d234be618e0d58f8cda3549416635b2bebcd22cd95c4":
  879. # ref: https://huggingface.co/K-intelligence/Midm-2.0-Base-Instruct
  880. res = "midm-2.0"
  881. if chkhsh == "169bf0296a13c4d9b7672313f749eb36501d931022de052aad6e36f2bf34dd51":
  882. # ref: https://huggingface.co/LiquidAI/LFM2-Tokenizer
  883. res = "lfm2"
  884. if chkhsh == "2085e1638f6c377a0aa4ead21b27bb4cb941bf800df86ed391011769c1758dfb":
  885. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B
  886. res = "exaone4"
  887. if chkhsh == "a1e163ecab2e718a4c829d1148b6e86824ec36163bb71941c3dca9cd5ac25756":
  888. # ref: https://huggingface.co/JetBrains/Mellum-4b-base
  889. res = "mellum"
  890. if chkhsh == "9b1be57e70d20d9501b2b3186e792d81181ae36ada3903c26f9fea418cf87206":
  891. # ref: https://huggingface.co/inclusionAI/Ling-mini-base-2.0
  892. res = "bailingmoe2"
  893. if chkhsh == "53e325976a6e142379c19b09afcae354f2f496f147afa8f9e189a33fe4e3024e":
  894. # ref: https://huggingface.co/ibm-granite/granite-docling-258M
  895. res = "granite-docling"
  896. if res is None:
  897. logger.warning("\n")
  898. logger.warning("**************************************************************************************")
  899. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  900. logger.warning("** There are 2 possible reasons for this:")
  901. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  902. logger.warning("** - the pre-tokenization config has changed upstream")
  903. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  904. logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
  905. logger.warning("**")
  906. logger.warning(f"** chkhsh: {chkhsh}")
  907. logger.warning("**************************************************************************************")
  908. logger.warning("\n")
  909. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  910. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  911. logger.debug(f"chkhsh: {chkhsh}")
  912. return res
  913. # Marker: End get_vocab_base_pre
  914. def _set_vocab_none(self) -> None:
  915. self.gguf_writer.add_tokenizer_model("none")
  916. def _set_vocab_gpt2(self) -> None:
  917. tokens, toktypes, tokpre = self.get_vocab_base()
  918. self.gguf_writer.add_tokenizer_model("gpt2")
  919. self.gguf_writer.add_tokenizer_pre(tokpre)
  920. self.gguf_writer.add_token_list(tokens)
  921. self.gguf_writer.add_token_types(toktypes)
  922. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  923. special_vocab.add_to_gguf(self.gguf_writer)
  924. def _set_vocab_qwen(self):
  925. dir_model = self.dir_model
  926. hparams = self.hparams
  927. tokens: list[str] = []
  928. toktypes: list[int] = []
  929. from transformers import AutoTokenizer
  930. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  931. vocab_size = hparams["vocab_size"]
  932. assert max(tokenizer.get_vocab().values()) < vocab_size
  933. tokpre = self.get_vocab_base_pre(tokenizer)
  934. merges = []
  935. vocab = {}
  936. mergeable_ranks = tokenizer.mergeable_ranks
  937. for token, rank in mergeable_ranks.items():
  938. vocab[QwenModel.token_bytes_to_string(token)] = rank
  939. if len(token) == 1:
  940. continue
  941. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  942. assert len(merged) == 2
  943. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  944. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  945. added_vocab = tokenizer.special_tokens
  946. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  947. for i in range(vocab_size):
  948. if i not in reverse_vocab:
  949. tokens.append(f"[PAD{i}]")
  950. toktypes.append(gguf.TokenType.UNUSED)
  951. elif reverse_vocab[i] in added_vocab:
  952. tokens.append(reverse_vocab[i])
  953. toktypes.append(gguf.TokenType.CONTROL)
  954. else:
  955. tokens.append(reverse_vocab[i])
  956. toktypes.append(gguf.TokenType.NORMAL)
  957. self.gguf_writer.add_tokenizer_model("gpt2")
  958. self.gguf_writer.add_tokenizer_pre(tokpre)
  959. self.gguf_writer.add_token_list(tokens)
  960. self.gguf_writer.add_token_types(toktypes)
  961. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  962. special_vocab.merges = merges
  963. # only add special tokens when they were not already loaded from config.json
  964. if len(special_vocab.special_token_ids) == 0:
  965. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  966. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  967. # this one is usually not in config.json anyway
  968. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  969. special_vocab.add_to_gguf(self.gguf_writer)
  970. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  971. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  972. self.gguf_writer.add_tokenizer_model("llama")
  973. self.gguf_writer.add_tokenizer_pre("default")
  974. self.gguf_writer.add_token_list(tokens)
  975. self.gguf_writer.add_token_scores(scores)
  976. self.gguf_writer.add_token_types(toktypes)
  977. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  978. special_vocab.add_to_gguf(self.gguf_writer)
  979. def _create_vocab_sentencepiece(self):
  980. from sentencepiece import SentencePieceProcessor
  981. tokenizer_path = self.dir_model / 'tokenizer.model'
  982. if not tokenizer_path.is_file():
  983. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  984. tokenizer = SentencePieceProcessor()
  985. tokenizer.LoadFromFile(str(tokenizer_path))
  986. vocab_size = self.find_hparam([
  987. "vocab_size_per_layer_input", # gemma3n
  988. "vocab_size",
  989. ], optional=True) or tokenizer.vocab_size()
  990. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  991. scores: list[float] = [-10000.0] * vocab_size
  992. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  993. for token_id in range(tokenizer.vocab_size()):
  994. if token_id >= vocab_size:
  995. logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}')
  996. break
  997. piece = tokenizer.IdToPiece(token_id)
  998. text = piece.encode("utf-8")
  999. score = tokenizer.GetScore(token_id)
  1000. toktype = SentencePieceTokenTypes.NORMAL
  1001. if tokenizer.IsUnknown(token_id):
  1002. toktype = SentencePieceTokenTypes.UNKNOWN
  1003. elif tokenizer.IsControl(token_id):
  1004. toktype = SentencePieceTokenTypes.CONTROL
  1005. elif tokenizer.IsUnused(token_id):
  1006. toktype = SentencePieceTokenTypes.UNUSED
  1007. elif tokenizer.IsByte(token_id):
  1008. toktype = SentencePieceTokenTypes.BYTE
  1009. tokens[token_id] = text
  1010. scores[token_id] = score
  1011. toktypes[token_id] = toktype
  1012. added_tokens_file = self.dir_model / 'added_tokens.json'
  1013. if added_tokens_file.is_file():
  1014. with open(added_tokens_file, "r", encoding="utf-8") as f:
  1015. added_tokens_json = json.load(f)
  1016. for key in added_tokens_json:
  1017. token_id = added_tokens_json[key]
  1018. if token_id >= vocab_size:
  1019. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  1020. continue
  1021. tokens[token_id] = key.encode("utf-8")
  1022. scores[token_id] = -1000.0
  1023. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1024. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1025. if tokenizer_config_file.is_file():
  1026. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1027. tokenizer_config_json = json.load(f)
  1028. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  1029. for token_id, token_data in added_tokens_decoder.items():
  1030. token_id = int(token_id)
  1031. token: str = token_data["content"]
  1032. if token_id >= vocab_size:
  1033. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  1034. continue
  1035. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1036. if tokens[token_id] != token.encode("utf-8"):
  1037. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  1038. if token_data.get("special") or self.does_token_look_special(token):
  1039. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1040. else:
  1041. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  1042. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1043. scores[token_id] = -1000.0
  1044. tokens[token_id] = token.encode("utf-8")
  1045. if vocab_size > len(tokens):
  1046. pad_count = vocab_size - len(tokens)
  1047. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  1048. for i in range(1, pad_count + 1):
  1049. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  1050. scores.append(-1000.0)
  1051. toktypes.append(SentencePieceTokenTypes.UNUSED)
  1052. return tokens, scores, toktypes
  1053. def _set_vocab_llama_hf(self):
  1054. vocab = gguf.LlamaHfVocab(self.dir_model)
  1055. tokens = []
  1056. scores = []
  1057. toktypes = []
  1058. for text, score, toktype in vocab.all_tokens():
  1059. tokens.append(text)
  1060. scores.append(score)
  1061. toktypes.append(toktype)
  1062. assert len(tokens) == vocab.vocab_size
  1063. self.gguf_writer.add_tokenizer_model("llama")
  1064. self.gguf_writer.add_tokenizer_pre("default")
  1065. self.gguf_writer.add_token_list(tokens)
  1066. self.gguf_writer.add_token_scores(scores)
  1067. self.gguf_writer.add_token_types(toktypes)
  1068. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1069. special_vocab.add_to_gguf(self.gguf_writer)
  1070. def _set_vocab_rwkv_world(self):
  1071. assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file()
  1072. vocab_size = self.hparams.get("vocab_size", 65536)
  1073. tokens: list[bytes] = ['<s>'.encode("utf-8")]
  1074. toktypes: list[int] = [gguf.TokenType.CONTROL]
  1075. with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f:
  1076. lines = f.readlines()
  1077. for line in lines:
  1078. parts = line.split(' ')
  1079. assert len(parts) >= 3
  1080. token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1])
  1081. token = token.encode("utf-8") if isinstance(token, str) else token
  1082. assert isinstance(token, bytes)
  1083. assert len(token) == token_len
  1084. token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff"
  1085. tokens.append(token_text.encode("utf-8"))
  1086. toktypes.append(gguf.TokenType.NORMAL)
  1087. remainder = vocab_size - len(tokens)
  1088. assert remainder >= 0
  1089. for i in range(len(tokens), vocab_size):
  1090. tokens.append(f"[PAD{i}]".encode("utf-8"))
  1091. toktypes.append(gguf.TokenType.UNUSED)
  1092. self.gguf_writer.add_tokenizer_model("rwkv")
  1093. self.gguf_writer.add_token_list(tokens)
  1094. self.gguf_writer.add_token_types(toktypes)
  1095. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  1096. if special_vocab.chat_template is None:
  1097. template_path = Path(__file__).parent / "models" / "templates" / "llama-cpp-rwkv-world.jinja"
  1098. if template_path.is_file():
  1099. with open(template_path, "r", encoding="utf-8") as f:
  1100. template = f.read()
  1101. else:
  1102. template = "rwkv-world"
  1103. special_vocab.chat_template = template
  1104. # hack: Add '\n\n' as the EOT token to make it chat normally
  1105. special_vocab._set_special_token("eot", 261)
  1106. # hack: Override these as they have already been set (incorrectly)
  1107. special_vocab.special_token_ids["bos"] = 0
  1108. special_vocab.special_token_ids["eos"] = 0
  1109. special_vocab.add_to_gguf(self.gguf_writer)
  1110. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  1111. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  1112. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  1113. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  1114. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  1115. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  1116. assert field # tokenizer model
  1117. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  1118. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  1119. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  1120. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  1121. assert field # token list
  1122. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  1123. if model_name == "llama-spm":
  1124. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  1125. assert field # token scores
  1126. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  1127. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  1128. assert field # token types
  1129. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  1130. if model_name != "llama-spm":
  1131. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  1132. assert field # token merges
  1133. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  1134. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  1135. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  1136. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  1137. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  1138. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  1139. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  1140. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  1141. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  1142. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  1143. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  1144. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  1145. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  1146. def _try_set_pooling_type(self) -> None:
  1147. # get pooling path
  1148. pooling_path = None
  1149. module_path = self.dir_model / "modules.json"
  1150. if module_path.is_file():
  1151. with open(module_path, encoding="utf-8") as f:
  1152. modules = json.load(f)
  1153. for mod in modules:
  1154. if mod["type"] == "sentence_transformers.models.Pooling":
  1155. pooling_path = mod["path"]
  1156. break
  1157. # get pooling type
  1158. if pooling_path is not None:
  1159. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  1160. pooling = json.load(f)
  1161. if pooling["pooling_mode_mean_tokens"]:
  1162. pooling_type = gguf.PoolingType.MEAN
  1163. elif pooling["pooling_mode_cls_token"]:
  1164. pooling_type = gguf.PoolingType.CLS
  1165. elif pooling["pooling_mode_lasttoken"]:
  1166. pooling_type = gguf.PoolingType.LAST
  1167. else:
  1168. raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported")
  1169. self.gguf_writer.add_pooling_type(pooling_type)
  1170. def _set_vocab_interns1(self):
  1171. tokens: list[str] = []
  1172. toktypes: list[int] = []
  1173. from transformers import AutoTokenizer
  1174. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  1175. vocab = getattr(tokenizer, 'vocab', tokenizer.get_vocab())
  1176. vocab_size = self.hparams.get("vocab_size", len(vocab))
  1177. assert max(vocab.values()) < vocab_size
  1178. tokpre = self.get_vocab_base_pre(tokenizer)
  1179. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab.items()}
  1180. added_vocab = tokenizer.get_added_vocab()
  1181. added_tokens_decoder = tokenizer.added_tokens_decoder
  1182. for i in range(vocab_size):
  1183. if i not in reverse_vocab:
  1184. tokens.append(f"[PAD{i}]")
  1185. toktypes.append(gguf.TokenType.UNUSED)
  1186. else:
  1187. token: str = reverse_vocab[i]
  1188. if token in added_vocab:
  1189. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  1190. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  1191. if not added_tokens_decoder[i].normalized:
  1192. previous_token = token
  1193. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  1194. if previous_token != token:
  1195. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  1196. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  1197. toktypes.append(gguf.TokenType.CONTROL)
  1198. else:
  1199. toktypes.append(gguf.TokenType.USER_DEFINED)
  1200. else:
  1201. toktypes.append(gguf.TokenType.NORMAL)
  1202. tokens.append(token)
  1203. self.gguf_writer.add_tokenizer_model("gpt2")
  1204. self.gguf_writer.add_tokenizer_pre(tokpre)
  1205. self.gguf_writer.add_token_list(tokens)
  1206. self.gguf_writer.add_token_types(toktypes)
  1207. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  1208. special_vocab._set_special_token("bos", 151643)
  1209. special_vocab.add_to_gguf(self.gguf_writer)
  1210. class MmprojModel(ModelBase):
  1211. model_type = ModelType.MMPROJ
  1212. model_arch = gguf.MODEL_ARCH.MMPROJ
  1213. preprocessor_config: dict[str, Any]
  1214. global_config: dict[str, Any]
  1215. n_block_keys = ["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"]
  1216. has_vision_encoder: bool = True # by default
  1217. has_audio_encoder: bool = False
  1218. # for models having multiple encoders, we need to separate their hparams
  1219. hparams_vision: dict[str, Any] | None = None
  1220. hparams_audio: dict[str, Any] | None = None
  1221. def __init__(self, *args, **kwargs):
  1222. super().__init__(*args, **kwargs)
  1223. if self.model_arch != gguf.MODEL_ARCH.MMPROJ:
  1224. raise TypeError("MmprojModel must be subclassed with model_arch = gguf.MODEL_ARCH.MMPROJ")
  1225. # get n_embd of the text model
  1226. if not self.is_mistral_format:
  1227. if "text_config" not in self.hparams:
  1228. self.hparams["text_config"] = {}
  1229. if "audio_config" not in self.hparams:
  1230. self.hparams["audio_config"] = {}
  1231. text_config = {**self.hparams, **self.hparams["text_config"]}
  1232. self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0))
  1233. else:
  1234. text_config = {
  1235. k: v for k, v in self.hparams.items() if k not in ["vision_encoder", "audio_encoder"]
  1236. }
  1237. self.n_embd_text = text_config.get("hidden_dim", 0)
  1238. assert self.n_embd_text > 0, "n_embd not found in hparams"
  1239. # move vision config to the top level, while preserving the original hparams in global_config
  1240. import copy
  1241. self.global_config = copy.deepcopy(self.hparams)
  1242. self.hparams_vision = self.get_vision_config()
  1243. self.hparams_audio = self.get_audio_config()
  1244. if self.hparams_vision is None and self.hparams_audio is None:
  1245. raise ValueError("vision_config / audio_config not found in hparams")
  1246. # for compat with vision-only models
  1247. self.hparams = self.hparams_vision or self.hparams_audio or self.hparams
  1248. # TODO @ngxson : this is a hack to support both vision and audio encoders
  1249. have_multiple_encoders = self.has_audio_encoder and self.has_vision_encoder
  1250. self.block_count = 128 if have_multiple_encoders else self.find_hparam(self.n_block_keys, True)
  1251. self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.MMPROJ, self.block_count)
  1252. # load preprocessor config
  1253. self.preprocessor_config = {}
  1254. if not self.is_mistral_format:
  1255. with open(self.dir_model / "preprocessor_config.json", "r", encoding="utf-8") as f:
  1256. self.preprocessor_config = json.load(f)
  1257. def get_vision_config(self) -> dict[str, Any] | None:
  1258. config_name = "vision_config" if not self.is_mistral_format else "vision_encoder"
  1259. return self.global_config.get(config_name)
  1260. def get_audio_config(self) -> dict[str, Any] | None:
  1261. return self.global_config.get("audio_config")
  1262. def set_type(self):
  1263. self.gguf_writer.add_type(gguf.GGUFType.MMPROJ)
  1264. def prepare_metadata(self, vocab_only: bool):
  1265. super().prepare_metadata(vocab_only=vocab_only)
  1266. output_type: str = self.ftype.name.partition("_")[2]
  1267. if self.fname_out.is_dir():
  1268. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=output_type, model_type=None)
  1269. self.fname_out = self.fname_out / f"mmproj-{fname_default}.gguf"
  1270. else:
  1271. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  1272. def set_gguf_parameters(self):
  1273. self.gguf_writer.add_file_type(self.ftype)
  1274. if self.has_vision_encoder:
  1275. self.gguf_writer.add_clip_has_vision_encoder(True)
  1276. self.gguf_writer.add_vision_projection_dim(self.n_embd_text)
  1277. # vision config
  1278. self.image_size = self.find_vparam(["image_size"])
  1279. self.gguf_writer.add_vision_image_size(self.image_size)
  1280. self.gguf_writer.add_vision_patch_size(self.find_vparam(["patch_size"]))
  1281. self.gguf_writer.add_vision_embedding_length(self.find_vparam(["hidden_size"]))
  1282. self.gguf_writer.add_vision_feed_forward_length(self.find_vparam(["intermediate_size"]))
  1283. self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys))
  1284. self.gguf_writer.add_vision_head_count(self.find_vparam(["num_attention_heads"]))
  1285. # preprocessor config
  1286. image_mean = _MISTRAL_COMMON_DATASET_MEAN if self.is_mistral_format else self.preprocessor_config["image_mean"]
  1287. image_std = _MISTRAL_COMMON_DATASET_STD if self.is_mistral_format else self.preprocessor_config["image_std"]
  1288. self.gguf_writer.add_vision_image_mean(image_mean)
  1289. self.gguf_writer.add_vision_image_std(image_std)
  1290. if self.has_audio_encoder:
  1291. self.gguf_writer.add_clip_has_audio_encoder(True)
  1292. self.gguf_writer.add_audio_projection_dim(self.n_embd_text)
  1293. # audio config
  1294. self.gguf_writer.add_audio_embedding_length(self.find_aparam(["hidden_size"]))
  1295. self.gguf_writer.add_audio_feed_forward_length(self.find_aparam(["intermediate_size"]))
  1296. self.gguf_writer.add_audio_block_count(self.find_aparam(self.n_block_keys))
  1297. self.gguf_writer.add_audio_head_count(self.find_aparam(["num_attention_heads"]))
  1298. if not self.has_vision_encoder and not self.has_audio_encoder:
  1299. raise ValueError("MmprojModel must have either vision or audio encoder")
  1300. def write_vocab(self):
  1301. raise ValueError("MmprojModel does not support vocab writing")
  1302. def find_vparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1303. assert self.hparams_vision is not None
  1304. return self._find_param(self.hparams_vision, keys, optional)
  1305. def find_aparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1306. assert self.hparams_audio is not None
  1307. return self._find_param(self.hparams_audio, keys, optional)
  1308. def _find_param(self, obj: dict[str, Any], keys: Iterable[str], optional: bool = False) -> Any:
  1309. key = next((k for k in keys if k in obj), None)
  1310. if key is not None:
  1311. return obj[key]
  1312. if optional:
  1313. return None
  1314. raise KeyError(f"could not find any of: {keys}")
  1315. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1316. del bid, name, n_dims # unused
  1317. if ".patch_embd.weight" in new_name:
  1318. return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
  1319. return False
  1320. @ModelBase.register("GPTNeoXForCausalLM")
  1321. class GPTNeoXModel(TextModel):
  1322. model_arch = gguf.MODEL_ARCH.GPTNEOX
  1323. def set_gguf_parameters(self):
  1324. block_count = self.hparams["num_hidden_layers"]
  1325. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1326. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1327. self.gguf_writer.add_block_count(block_count)
  1328. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1329. self.gguf_writer.add_rope_dimension_count(
  1330. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  1331. )
  1332. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1333. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  1334. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  1335. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1336. del bid # unused
  1337. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1338. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1339. tensors: list[tuple[str, Tensor]] = []
  1340. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  1341. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1342. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1343. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1344. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1345. data_torch = torch.cat(
  1346. (
  1347. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1348. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1349. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1350. ),
  1351. dim=0,
  1352. )
  1353. logger.info("re-format attention.linear_qkv.weight")
  1354. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  1355. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1356. data_torch = torch.cat(
  1357. (
  1358. qkv_bias[:, 0, :].reshape((n_embed,)),
  1359. qkv_bias[:, 1, :].reshape((n_embed,)),
  1360. qkv_bias[:, 2, :].reshape((n_embed,)),
  1361. ),
  1362. dim=0,
  1363. )
  1364. logger.info("re-format attention.linear_qkv.bias")
  1365. tensors.append((self.map_tensor_name(name), data_torch))
  1366. return tensors
  1367. @ModelBase.register("BloomForCausalLM", "BloomModel")
  1368. class BloomModel(TextModel):
  1369. model_arch = gguf.MODEL_ARCH.BLOOM
  1370. def set_gguf_parameters(self):
  1371. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1372. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1373. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  1374. self.gguf_writer.add_embedding_length(n_embed)
  1375. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  1376. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1377. self.gguf_writer.add_head_count(n_head)
  1378. self.gguf_writer.add_head_count_kv(n_head)
  1379. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1380. self.gguf_writer.add_file_type(self.ftype)
  1381. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1382. del bid # unused
  1383. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1384. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1385. name = re.sub(r'transformer\.', '', name)
  1386. tensors: list[tuple[str, Tensor]] = []
  1387. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  1388. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1389. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1390. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1391. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1392. data_torch = torch.cat(
  1393. (
  1394. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1395. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1396. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1397. ),
  1398. dim=0,
  1399. )
  1400. logger.info("re-format attention.linear_qkv.weight")
  1401. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  1402. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1403. data_torch = torch.cat(
  1404. (
  1405. qkv_bias[:, 0, :].reshape((n_embed,)),
  1406. qkv_bias[:, 1, :].reshape((n_embed,)),
  1407. qkv_bias[:, 2, :].reshape((n_embed,)),
  1408. ),
  1409. dim=0,
  1410. )
  1411. logger.info("re-format attention.linear_qkv.bias")
  1412. tensors.append((self.map_tensor_name(name), data_torch))
  1413. return tensors
  1414. @ModelBase.register("MPTForCausalLM")
  1415. class MPTModel(TextModel):
  1416. model_arch = gguf.MODEL_ARCH.MPT
  1417. def set_vocab(self):
  1418. try:
  1419. self._set_vocab_gpt2()
  1420. except Exception:
  1421. # Fallback for SEA-LION model
  1422. self._set_vocab_sentencepiece()
  1423. self.gguf_writer.add_add_bos_token(False)
  1424. self.gguf_writer.add_pad_token_id(3)
  1425. self.gguf_writer.add_eos_token_id(1)
  1426. self.gguf_writer.add_unk_token_id(0)
  1427. def set_gguf_parameters(self):
  1428. block_count = self.hparams["n_layers"]
  1429. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1430. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1431. self.gguf_writer.add_block_count(block_count)
  1432. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  1433. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1434. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  1435. self.gguf_writer.add_head_count_kv(kv_n_heads)
  1436. self.gguf_writer.add_layer_norm_eps(1e-5)
  1437. if self.hparams["attn_config"]["clip_qkv"] is not None:
  1438. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  1439. if self.hparams["attn_config"]["alibi"]:
  1440. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  1441. else:
  1442. self.gguf_writer.add_max_alibi_bias(0.0)
  1443. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1444. del bid # unused
  1445. if "scales" in name:
  1446. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  1447. new_name = new_name.replace("scales", "act.scales")
  1448. else:
  1449. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  1450. return [(new_name, data_torch)]
  1451. @ModelBase.register("OrionForCausalLM")
  1452. class OrionModel(TextModel):
  1453. model_arch = gguf.MODEL_ARCH.ORION
  1454. def set_vocab(self):
  1455. self._set_vocab_sentencepiece()
  1456. def set_gguf_parameters(self):
  1457. block_count = self.hparams["num_hidden_layers"]
  1458. head_count = self.hparams["num_attention_heads"]
  1459. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1460. ctx_length = 0
  1461. if "max_sequence_length" in self.hparams:
  1462. ctx_length = self.hparams["max_sequence_length"]
  1463. elif "max_position_embeddings" in self.hparams:
  1464. ctx_length = self.hparams["max_position_embeddings"]
  1465. elif "model_max_length" in self.hparams:
  1466. ctx_length = self.hparams["model_max_length"]
  1467. else:
  1468. raise ValueError("gguf: can not find ctx length parameter.")
  1469. self.gguf_writer.add_file_type(self.ftype)
  1470. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1471. self.gguf_writer.add_context_length(ctx_length)
  1472. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1473. self.gguf_writer.add_block_count(block_count)
  1474. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1475. self.gguf_writer.add_head_count(head_count)
  1476. self.gguf_writer.add_head_count_kv(head_count_kv)
  1477. # note: config provides rms norm but it is actually layer norm
  1478. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  1479. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  1480. @ModelBase.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  1481. class BaichuanModel(TextModel):
  1482. model_arch = gguf.MODEL_ARCH.BAICHUAN
  1483. def set_vocab(self):
  1484. self._set_vocab_sentencepiece()
  1485. def set_gguf_parameters(self):
  1486. block_count = self.hparams["num_hidden_layers"]
  1487. head_count = self.hparams["num_attention_heads"]
  1488. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1489. ctx_length = 0
  1490. if "max_sequence_length" in self.hparams:
  1491. ctx_length = self.hparams["max_sequence_length"]
  1492. elif "max_position_embeddings" in self.hparams:
  1493. ctx_length = self.hparams["max_position_embeddings"]
  1494. elif "model_max_length" in self.hparams:
  1495. ctx_length = self.hparams["model_max_length"]
  1496. else:
  1497. raise ValueError("gguf: can not find ctx length parameter.")
  1498. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1499. self.gguf_writer.add_context_length(ctx_length)
  1500. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1501. self.gguf_writer.add_block_count(block_count)
  1502. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1503. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1504. self.gguf_writer.add_head_count(head_count)
  1505. self.gguf_writer.add_head_count_kv(head_count_kv)
  1506. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1507. self.gguf_writer.add_file_type(self.ftype)
  1508. rope_scaling = self.hparams.get("rope_scaling") or {}
  1509. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1510. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1511. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1512. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1513. head_count = self.hparams["num_attention_heads"]
  1514. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1515. tensors: list[tuple[str, Tensor]] = []
  1516. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  1517. logger.info(f"Unpacking and permuting layer {bid}")
  1518. tensors = [
  1519. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  1520. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  1521. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  1522. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  1523. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  1524. self._reverse_hf_part(data_torch, 2)),
  1525. ]
  1526. else:
  1527. tensors = [(self.map_tensor_name(name), data_torch)]
  1528. return tensors
  1529. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1530. if n_kv_head is not None and n_head != n_kv_head:
  1531. n_head //= n_kv_head
  1532. return (
  1533. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1534. .swapaxes(1, 2)
  1535. .reshape(weights.shape)
  1536. )
  1537. def _reverse_hf_permute_part(
  1538. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  1539. ) -> Tensor:
  1540. r = weights.shape[0] // 3
  1541. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  1542. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  1543. r = weights.shape[0] // 3
  1544. return weights[r * n_part:r * n_part + r, ...]
  1545. @ModelBase.register("XverseForCausalLM")
  1546. class XverseModel(TextModel):
  1547. model_arch = gguf.MODEL_ARCH.XVERSE
  1548. def set_vocab(self):
  1549. assert (self.dir_model / "tokenizer.json").is_file()
  1550. dir_model = self.dir_model
  1551. hparams = self.hparams
  1552. tokens: list[bytes] = []
  1553. toktypes: list[int] = []
  1554. from transformers import AutoTokenizer
  1555. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  1556. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  1557. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  1558. # because vocab_size is the count of items, and indexes start at 0.
  1559. max_vocab_index = max(tokenizer.get_vocab().values())
  1560. if max_vocab_index >= vocab_size:
  1561. raise ValueError("Vocabulary size exceeds expected maximum size.")
  1562. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  1563. added_vocab = tokenizer.get_added_vocab()
  1564. for token_id in range(vocab_size):
  1565. token_text = reverse_vocab[token_id].encode('utf-8')
  1566. # replace "\x00" to string with length > 0
  1567. if token_text == b"\x00":
  1568. toktype = gguf.TokenType.BYTE # special
  1569. token_text = f"<{token_text}>".encode('utf-8')
  1570. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  1571. toktype = gguf.TokenType.BYTE # special
  1572. elif reverse_vocab[token_id] in added_vocab:
  1573. if tokenizer.added_tokens_decoder[token_id].special:
  1574. toktype = gguf.TokenType.CONTROL
  1575. else:
  1576. toktype = gguf.TokenType.USER_DEFINED
  1577. else:
  1578. toktype = gguf.TokenType.NORMAL
  1579. tokens.append(token_text)
  1580. toktypes.append(toktype)
  1581. self.gguf_writer.add_tokenizer_model("llama")
  1582. self.gguf_writer.add_tokenizer_pre("default")
  1583. self.gguf_writer.add_token_list(tokens)
  1584. self.gguf_writer.add_token_types(toktypes)
  1585. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  1586. special_vocab.add_to_gguf(self.gguf_writer)
  1587. def set_gguf_parameters(self):
  1588. block_count = self.hparams["num_hidden_layers"]
  1589. head_count = self.hparams["num_attention_heads"]
  1590. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1591. ctx_length = 0
  1592. if "max_sequence_length" in self.hparams:
  1593. ctx_length = self.hparams["max_sequence_length"]
  1594. elif "max_position_embeddings" in self.hparams:
  1595. ctx_length = self.hparams["max_position_embeddings"]
  1596. elif "model_max_length" in self.hparams:
  1597. ctx_length = self.hparams["model_max_length"]
  1598. else:
  1599. raise ValueError("gguf: can not find ctx length parameter.")
  1600. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1601. self.gguf_writer.add_context_length(ctx_length)
  1602. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1603. self.gguf_writer.add_block_count(block_count)
  1604. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1605. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1606. self.gguf_writer.add_head_count(head_count)
  1607. self.gguf_writer.add_head_count_kv(head_count_kv)
  1608. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1609. self.gguf_writer.add_file_type(self.ftype)
  1610. rope_scaling = self.hparams.get("rope_scaling") or {}
  1611. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1612. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1613. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1614. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1615. del bid # unused
  1616. head_count = self.hparams["num_attention_heads"]
  1617. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1618. # HF models permute some of the tensors, so we need to undo that
  1619. if name.endswith("q_proj.weight"):
  1620. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1621. if name.endswith("k_proj.weight"):
  1622. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1623. return [(self.map_tensor_name(name), data_torch)]
  1624. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1625. if n_kv_head is not None and n_head != n_kv_head:
  1626. n_head //= n_kv_head
  1627. return (
  1628. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1629. .swapaxes(1, 2)
  1630. .reshape(weights.shape)
  1631. )
  1632. @ModelBase.register("FalconForCausalLM", "RWForCausalLM")
  1633. class FalconModel(TextModel):
  1634. model_arch = gguf.MODEL_ARCH.FALCON
  1635. def set_gguf_parameters(self):
  1636. block_count = self.hparams.get("num_hidden_layers")
  1637. if block_count is None:
  1638. block_count = self.hparams["n_layer"] # old name
  1639. n_head = self.hparams.get("num_attention_heads")
  1640. if n_head is None:
  1641. n_head = self.hparams["n_head"] # old name
  1642. n_head_kv = self.hparams.get("num_kv_heads")
  1643. if n_head_kv is None:
  1644. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1645. self.gguf_writer.add_context_length(2048) # not in config.json
  1646. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1647. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1648. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1649. self.gguf_writer.add_block_count(block_count)
  1650. self.gguf_writer.add_head_count(n_head)
  1651. self.gguf_writer.add_head_count_kv(n_head_kv)
  1652. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1653. self.gguf_writer.add_file_type(self.ftype)
  1654. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1655. del bid # unused
  1656. # QKV tensor transform
  1657. # The original query_key_value tensor contains n_head_kv "kv groups",
  1658. # each consisting of n_head/n_head_kv query weights followed by one key
  1659. # and one value weight (shared by all query heads in the kv group).
  1660. # This layout makes it a big pain to work with in GGML.
  1661. # So we rearrange them here,, so that we have n_head query weights
  1662. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1663. # in contiguous fashion.
  1664. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1665. if "query_key_value" in name:
  1666. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1667. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1668. head_dim = self.hparams["hidden_size"] // n_head
  1669. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1670. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1671. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1672. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1673. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1674. return [(self.map_tensor_name(name), data_torch)]
  1675. @ModelBase.register("GPTBigCodeForCausalLM")
  1676. class StarCoderModel(TextModel):
  1677. model_arch = gguf.MODEL_ARCH.STARCODER
  1678. def set_gguf_parameters(self):
  1679. block_count = self.hparams["n_layer"]
  1680. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1681. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1682. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1683. self.gguf_writer.add_block_count(block_count)
  1684. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1685. self.gguf_writer.add_head_count_kv(1)
  1686. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1687. self.gguf_writer.add_file_type(self.ftype)
  1688. @ModelBase.register("GPTRefactForCausalLM")
  1689. class RefactModel(TextModel):
  1690. model_arch = gguf.MODEL_ARCH.REFACT
  1691. def set_vocab(self):
  1692. super().set_vocab()
  1693. # TODO: how to determine special FIM tokens automatically?
  1694. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1695. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1696. special_vocab._set_special_token("prefix", 1)
  1697. special_vocab._set_special_token("suffix", 3)
  1698. special_vocab._set_special_token("middle", 2)
  1699. special_vocab.chat_template = None # do not add it twice
  1700. special_vocab.add_to_gguf(self.gguf_writer)
  1701. def set_gguf_parameters(self):
  1702. hidden_dim = self.hparams["n_embd"]
  1703. inner_dim = 4 * hidden_dim
  1704. hidden_dim = int(2 * inner_dim / 3)
  1705. multiple_of = 256
  1706. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1707. block_count = self.hparams["n_layer"]
  1708. # refact uses Alibi. So this is from config.json which might be used by training.
  1709. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1710. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1711. self.gguf_writer.add_feed_forward_length(ff_dim)
  1712. self.gguf_writer.add_block_count(block_count)
  1713. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1714. self.gguf_writer.add_head_count_kv(1)
  1715. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1716. self.gguf_writer.add_file_type(self.ftype)
  1717. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1718. hidden_dim = self.hparams["n_embd"]
  1719. inner_dim = 4 * hidden_dim
  1720. hidden_dim = int(2 * inner_dim / 3)
  1721. multiple_of = 256
  1722. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1723. n_head = self.hparams["n_head"]
  1724. n_head_kv = 1
  1725. head_dim = self.hparams["n_embd"] // n_head
  1726. tensors: list[tuple[str, Tensor]] = []
  1727. if bid is not None:
  1728. if name == f"transformer.h.{bid}.attn.kv.weight":
  1729. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1730. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1731. elif name == f"transformer.h.{bid}.attn.q.weight":
  1732. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1733. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1734. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1735. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1736. if len(tensors) == 0:
  1737. tensors.append((self.map_tensor_name(name), data_torch))
  1738. return tensors
  1739. @ModelBase.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1740. class StableLMModel(TextModel):
  1741. model_arch = gguf.MODEL_ARCH.STABLELM
  1742. def set_vocab(self):
  1743. if (self.dir_model / "tokenizer.json").is_file():
  1744. self._set_vocab_gpt2()
  1745. else:
  1746. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1747. self._set_vocab_qwen()
  1748. def set_gguf_parameters(self):
  1749. hparams = self.hparams
  1750. block_count = hparams["num_hidden_layers"]
  1751. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1752. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1753. self.gguf_writer.add_block_count(block_count)
  1754. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1755. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1756. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1757. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1758. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1759. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1760. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1761. self.gguf_writer.add_file_type(self.ftype)
  1762. _q_norms: list[dict[str, Tensor]] | None = None
  1763. _k_norms: list[dict[str, Tensor]] | None = None
  1764. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1765. n_head = self.hparams["num_attention_heads"]
  1766. n_kv_head = self.hparams["num_key_value_heads"]
  1767. if name.find("q_layernorm.norms") != -1:
  1768. assert bid is not None
  1769. if self._q_norms is None:
  1770. self._q_norms = [{} for _ in range(self.block_count)]
  1771. self._q_norms[bid][name] = data_torch
  1772. if len(self._q_norms[bid]) >= n_head:
  1773. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1774. else:
  1775. return []
  1776. if name.find("k_layernorm.norms") != -1:
  1777. assert bid is not None
  1778. if self._k_norms is None:
  1779. self._k_norms = [{} for _ in range(self.block_count)]
  1780. self._k_norms[bid][name] = data_torch
  1781. if len(self._k_norms[bid]) >= n_kv_head:
  1782. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1783. else:
  1784. return []
  1785. return [(self.map_tensor_name(name), data_torch)]
  1786. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1787. datas: list[Tensor] = []
  1788. # extract the norms in order
  1789. for xid in range(n_head):
  1790. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1791. datas.append(norms[ename])
  1792. del norms[ename]
  1793. data_torch = torch.stack(datas, dim=0)
  1794. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1795. new_name = self.map_tensor_name(merged_name)
  1796. return [(new_name, data_torch)]
  1797. def prepare_tensors(self):
  1798. super().prepare_tensors()
  1799. if self._q_norms is not None or self._k_norms is not None:
  1800. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1801. norms = (
  1802. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1803. ) + (
  1804. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1805. )
  1806. if len(norms) > 0:
  1807. raise ValueError(f"Unprocessed norms: {norms}")
  1808. @ModelBase.register(
  1809. "LLaMAForCausalLM",
  1810. "LlamaForCausalLM",
  1811. "MistralForCausalLM",
  1812. "MixtralForCausalLM",
  1813. "VLlama3ForCausalLM",
  1814. "LlavaForConditionalGeneration",
  1815. "VoxtralForConditionalGeneration",
  1816. "LlamaModel")
  1817. class LlamaModel(TextModel):
  1818. model_arch = gguf.MODEL_ARCH.LLAMA
  1819. undo_permute = True
  1820. def __init__(self, *args, **kwargs):
  1821. super().__init__(*args, **kwargs)
  1822. # fix for SmolVLM2, missing `num_attention_heads` in config.json
  1823. if self.hf_arch == "VLlama3ForCausalLM":
  1824. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32)
  1825. def _set_vocab_mistral(self):
  1826. if not _mistral_common_installed:
  1827. raise ImportError(_mistral_import_error_msg)
  1828. vocab = MistralVocab(self.dir_model)
  1829. logger.info(
  1830. f"Converting tokenizer {vocab.tokenizer_type} of size {vocab.vocab_size}."
  1831. )
  1832. self.gguf_writer.add_tokenizer_model(vocab.gguf_tokenizer_model)
  1833. tokens = []
  1834. scores = []
  1835. toktypes = []
  1836. for text, score, toktype in vocab.all_tokens():
  1837. tokens.append(text)
  1838. scores.append(score)
  1839. toktypes.append(toktype)
  1840. assert len(tokens) == vocab.vocab_size, (
  1841. f"token count ({len(tokens)}) != vocab size ({vocab.vocab_size})"
  1842. )
  1843. if vocab.tokenizer_type == MistralTokenizerType.tekken:
  1844. self.gguf_writer.add_tokenizer_pre("tekken")
  1845. self.gguf_writer.add_token_merges(
  1846. vocab.extract_vocab_merges_from_model()
  1847. )
  1848. logger.info(
  1849. f"Setting bos, eos, unk and pad token IDs to {vocab.bos_id}, {vocab.eos_id}, {vocab.unk_id}, {vocab.pad_id}."
  1850. )
  1851. self.gguf_writer.add_bos_token_id(vocab.bos_id)
  1852. self.gguf_writer.add_eos_token_id(vocab.eos_id)
  1853. self.gguf_writer.add_unk_token_id(vocab.unk_id)
  1854. self.gguf_writer.add_pad_token_id(vocab.pad_id)
  1855. self.gguf_writer.add_token_list(tokens)
  1856. self.gguf_writer.add_token_scores(scores)
  1857. self.gguf_writer.add_token_types(toktypes)
  1858. self.gguf_writer.add_vocab_size(vocab.vocab_size)
  1859. self.gguf_writer.add_add_bos_token(True)
  1860. self.gguf_writer.add_add_eos_token(False)
  1861. template_dir = Path(__file__).parent / "models/templates/"
  1862. if not self.is_mistral_format or not self.disable_mistral_community_chat_template:
  1863. # Log only for Mistral format that the official tokenization and detokenization is via `mistral-common`.
  1864. if self.is_mistral_format:
  1865. logger.info(
  1866. "Using a Mistral community chat template. These templates can be subject to errors in early days or weeks after a release. "
  1867. "Mistral recommends to use `mistral-common` to perform tokenization and detokenization."
  1868. )
  1869. template = MistralModel.get_community_chat_template(vocab, template_dir, self.is_mistral_format)
  1870. self.gguf_writer.add_chat_template(template)
  1871. else:
  1872. logger.info("Not using a Mistral community chat template. Ensure to perform the tokenization and detokenization via `mistral-common`.")
  1873. def set_vocab(self):
  1874. if self.is_mistral_format:
  1875. return self._set_vocab_mistral()
  1876. path_tekken_json = self.dir_model / "tekken.json"
  1877. path_tokenizer_json = self.dir_model / "tokenizer.json"
  1878. if path_tekken_json.is_file() and not path_tokenizer_json.is_file():
  1879. self._set_vocab_mistral()
  1880. try:
  1881. self._set_vocab_sentencepiece()
  1882. except FileNotFoundError:
  1883. try:
  1884. self._set_vocab_llama_hf()
  1885. except (FileNotFoundError, TypeError):
  1886. # Llama 3
  1887. self._set_vocab_gpt2()
  1888. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1889. if self.hparams.get("vocab_size", 32000) == 32016:
  1890. special_vocab = gguf.SpecialVocab(
  1891. self.dir_model, load_merges=False,
  1892. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1893. )
  1894. special_vocab._set_special_token("prefix", 32007)
  1895. special_vocab._set_special_token("suffix", 32008)
  1896. special_vocab._set_special_token("middle", 32009)
  1897. special_vocab._set_special_token("eot", 32010)
  1898. special_vocab.add_to_gguf(self.gguf_writer)
  1899. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1900. if tokenizer_config_file.is_file():
  1901. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1902. tokenizer_config_json = json.load(f)
  1903. if "add_prefix_space" in tokenizer_config_json:
  1904. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1905. # Apply to granite small models only
  1906. if self.hparams.get("vocab_size", 32000) == 49152:
  1907. self.gguf_writer.add_add_bos_token(False)
  1908. def set_gguf_parameters(self):
  1909. super().set_gguf_parameters()
  1910. hparams = self.hparams
  1911. if not self.is_mistral_format:
  1912. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1913. if (rope_dim := hparams.get("head_dim")) is None:
  1914. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1915. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1916. rope_scaling = self.hparams.get("rope_scaling") or {}
  1917. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1918. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1919. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1920. @staticmethod
  1921. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1922. if n_head_kv is not None and n_head != n_head_kv:
  1923. n_head = n_head_kv
  1924. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1925. .swapaxes(1, 2)
  1926. .reshape(weights.shape))
  1927. _experts: list[dict[str, Tensor]] | None = None
  1928. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1929. n_head = self.find_hparam(["n_heads", "num_attention_heads"])
  1930. n_kv_head = self.find_hparam(["n_kv_heads", "num_key_value_heads"])
  1931. vision_prefixes = [
  1932. "vision_encoder.",
  1933. "vision_language_adapter.",
  1934. "patch_merger.",
  1935. "pre_mm_projector_norm",
  1936. ]
  1937. is_multimodal_tensor = "vision_tower" in name \
  1938. or "vision_model" in name \
  1939. or "audio_tower" in name \
  1940. or "model.connector" in name \
  1941. or "multi_modal_projector" in name \
  1942. or any(
  1943. name.startswith(prefix)
  1944. for prefix in vision_prefixes
  1945. )
  1946. if is_multimodal_tensor:
  1947. return [] # skip vision tensors
  1948. elif self.hf_arch == "LlamaModel":
  1949. name = "model." + name
  1950. elif name.startswith("model.text_model"):
  1951. name = name.replace("text_model.", "") # for SmolVLM
  1952. elif name.startswith("language_model."):
  1953. name = name.replace("language_model.", "") # for the rest
  1954. if self.undo_permute:
  1955. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1956. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1957. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1958. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1959. # process the experts separately
  1960. if name.find("block_sparse_moe.experts") != -1:
  1961. n_experts = self.hparams["num_local_experts"]
  1962. assert bid is not None
  1963. if self._experts is None:
  1964. self._experts = [{} for _ in range(self.block_count)]
  1965. self._experts[bid][name] = data_torch
  1966. if len(self._experts[bid]) >= n_experts * 3:
  1967. tensors: list[tuple[str, Tensor]] = []
  1968. # merge the experts into a single 3d tensor
  1969. for wid in ["w1", "w2", "w3"]:
  1970. datas: list[Tensor] = []
  1971. for xid in range(n_experts):
  1972. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1973. datas.append(self._experts[bid][ename])
  1974. del self._experts[bid][ename]
  1975. data_torch = torch.stack(datas, dim=0)
  1976. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1977. new_name = self.map_tensor_name(merged_name)
  1978. tensors.append((new_name, data_torch))
  1979. return tensors
  1980. else:
  1981. return []
  1982. return [(self.map_tensor_name(name), data_torch)]
  1983. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1984. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1985. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1986. base = self.hparams.get("rope_theta", 10000.0)
  1987. if (dim := self.hparams.get("head_dim")) is None:
  1988. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  1989. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1990. factor = rope_scaling.get("factor", 8.0)
  1991. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1992. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1993. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1994. low_freq_wavelen = old_context_len / low_freq_factor
  1995. high_freq_wavelen = old_context_len / high_freq_factor
  1996. # assert low_freq_wavelen != high_freq_wavelen # Errors for Llama4
  1997. rope_factors = []
  1998. for freq in freqs:
  1999. wavelen = 2 * math.pi / freq
  2000. if wavelen < high_freq_wavelen:
  2001. rope_factors.append(1)
  2002. elif wavelen > low_freq_wavelen:
  2003. rope_factors.append(factor)
  2004. else:
  2005. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  2006. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  2007. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  2008. def prepare_tensors(self):
  2009. super().prepare_tensors()
  2010. if self._experts is not None:
  2011. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2012. experts = [k for d in self._experts for k in d.keys()]
  2013. if len(experts) > 0:
  2014. raise ValueError(f"Unprocessed experts: {experts}")
  2015. @ModelBase.register("ArceeForCausalLM")
  2016. class ArceeModel(LlamaModel):
  2017. model_arch = gguf.MODEL_ARCH.ARCEE
  2018. def set_gguf_parameters(self):
  2019. super().set_gguf_parameters()
  2020. self._try_set_pooling_type()
  2021. rope_scaling = self.hparams.get("rope_scaling") or {}
  2022. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2023. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2024. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2025. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2026. @ModelBase.register(
  2027. "LlavaForConditionalGeneration", # pixtral
  2028. "Mistral3ForConditionalGeneration", # mistral small 3.1
  2029. )
  2030. class LlavaVisionModel(MmprojModel):
  2031. img_break_tok_id = -1
  2032. def __init__(self, *args, **kwargs):
  2033. super().__init__(*args, **kwargs)
  2034. if self.hparams.get("model_type") == "pixtral":
  2035. # layer_norm_eps is not in config.json, it is hard-coded in modeling_pixtral.py
  2036. self.hparams["layer_norm_eps"] = self.hparams.get("layer_norm_eps", 1e-5)
  2037. self.img_break_tok_id = self.get_token_id("[IMG_BREAK]")
  2038. elif self.is_mistral_format:
  2039. # hparams is already vision config here so norm_eps is only defined in global_config.
  2040. self.hparams["norm_eps"] = self.global_config.get("norm_eps", None)
  2041. assert self.hparams["norm_eps"] is not None, "norm_eps not found in params.json"
  2042. self.img_break_tok_id = self.find_vparam(["image_break_token_id"])
  2043. else:
  2044. raise ValueError(f"Unsupported model type: {self.hparams['model_type']}")
  2045. logger.info(f"Image break token id: {self.img_break_tok_id}")
  2046. def get_token_id(self, token: str) -> int:
  2047. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2048. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2049. added_tokens_decoder = json.load(f)['added_tokens_decoder']
  2050. for id_, token_data in added_tokens_decoder.items():
  2051. if token_data["content"] == token:
  2052. return int(id_)
  2053. raise ValueError(f"Token '{token}' not found in tokenizer config.")
  2054. def set_gguf_parameters(self):
  2055. super().set_gguf_parameters()
  2056. hparams = self.hparams
  2057. if hparams.get("model_type") == "pixtral":
  2058. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PIXTRAL)
  2059. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  2060. # hidden_act
  2061. if hparams["hidden_act"] == "silu":
  2062. self.gguf_writer.add_vision_use_silu(True)
  2063. elif hparams["hidden_act"] == "gelu":
  2064. self.gguf_writer.add_vision_use_gelu(True)
  2065. else:
  2066. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  2067. # spatial_merge_size
  2068. if "spatial_merge_size" in self.global_config:
  2069. self.gguf_writer.add_vision_spatial_merge_size(self.global_config["spatial_merge_size"])
  2070. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2071. del bid # unused
  2072. n_head = (
  2073. self.hparams["num_attention_heads"] if not self.is_mistral_format else self.find_vparam(["num_attention_heads"])
  2074. )
  2075. n_kv_head = n_head
  2076. valid_prefixes = (
  2077. "multi_modal_projector.",
  2078. "vision_tower.",
  2079. "vision_encoder.",
  2080. "vision_language_adapter.",
  2081. "patch_merger.",
  2082. "pre_mm_projector_norm",
  2083. )
  2084. if any(name.startswith(prefix) for prefix in valid_prefixes):
  2085. # process vision tensors
  2086. if name.endswith(("q_proj.weight", "q_proj.bias")) and not self.is_mistral_format:
  2087. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2088. if name.endswith(("k_proj.weight", "k_proj.bias")) and not self.is_mistral_format:
  2089. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2090. return [(self.map_tensor_name(name), data_torch)]
  2091. embed_key = "embed_tokens.weight" if not self.is_mistral_format else "tok_embeddings.weight"
  2092. if self.img_break_tok_id > 0 and embed_key in name:
  2093. logger.info(f"Extracting [IMG_BREAK] token embedding from {name}")
  2094. # for pixtral model, we need to extract the [IMG_BREAK] token embedding
  2095. img_break_embd = data_torch[self.img_break_tok_id]
  2096. name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK]
  2097. return [(self.map_tensor_name(name), img_break_embd)]
  2098. return [] # skip other tensors
  2099. @ModelBase.register("Idefics3ForConditionalGeneration", "SmolVLMForConditionalGeneration")
  2100. class SmolVLMModel(MmprojModel):
  2101. def __init__(self, *args, **kwargs):
  2102. super().__init__(*args, **kwargs)
  2103. if self.hparams["model_type"] == "smolvlm_vision":
  2104. # fix for SmolVLM2, missing some keys in config.json
  2105. # default values are taken from transformers code
  2106. self.hparams["hidden_size"] = self.hparams.get("hidden_size", 1152)
  2107. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 16)
  2108. self.hparams["intermediate_size"] = self.hparams.get("intermediate_size", 3072)
  2109. def set_gguf_parameters(self):
  2110. super().set_gguf_parameters()
  2111. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.IDEFICS3)
  2112. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  2113. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("scale_factor", 2))
  2114. self.gguf_writer.add_vision_use_gelu(True)
  2115. # Add the preprocessor longest edge size
  2116. preproc_image_size = self.preprocessor_config.get("size", {}).get("longest_edge", self.image_size)
  2117. self.gguf_writer.add_vision_preproc_image_size(preproc_image_size)
  2118. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2119. if ".embeddings." in name:
  2120. return gguf.GGMLQuantizationType.F32
  2121. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2122. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2123. del bid # unused
  2124. is_vision_tensor = "vision_tower" in name or "vision_model" in name or "model.connector" in name
  2125. if is_vision_tensor:
  2126. return [(self.map_tensor_name(name), data_torch)]
  2127. return [] # skip other tensors
  2128. @ModelBase.register(
  2129. "Llama4ForConditionalGeneration",
  2130. "Llama4ForCausalLM",
  2131. )
  2132. class Llama4Model(LlamaModel):
  2133. model_arch = gguf.MODEL_ARCH.LLAMA4
  2134. undo_permute = False
  2135. def __init__(self, *args, **kwargs):
  2136. super().__init__(*args, **kwargs)
  2137. # IMPORTANT: the normal "intermediate_size" is renamed to "intermediate_size_mlp", we need to undo this
  2138. self.hparams["intermediate_size_moe"] = self.hparams["intermediate_size"]
  2139. self.hparams["intermediate_size"] = self.hparams["intermediate_size_mlp"]
  2140. def set_vocab(self):
  2141. self._set_vocab_gpt2()
  2142. def set_gguf_parameters(self):
  2143. super().set_gguf_parameters()
  2144. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["interleave_moe_layer_step"])
  2145. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
  2146. if "layer_types" in self.hparams:
  2147. if all(lt == "full_attention" for lt in self.hparams["layer_types"]):
  2148. # all layers are full attention (for MobileLLM), disable swa
  2149. self.gguf_writer.add_sliding_window(0)
  2150. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  2151. if name.startswith("language_model."):
  2152. name = name.replace("language_model.", "")
  2153. # split the gate_up into gate and up
  2154. if "gate_up_proj" in name:
  2155. name_up = name.replace("gate_up_proj", "up_proj.weight")
  2156. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  2157. dim_half = data_torch.shape[-1] // 2
  2158. gate_proj_weight, up_proj_weight = data_torch.transpose(-1, -2).split(dim_half, dim=-2)
  2159. return [
  2160. (self.map_tensor_name(name_gate), gate_proj_weight),
  2161. (self.map_tensor_name(name_up), up_proj_weight)
  2162. ]
  2163. if name.endswith("down_proj"):
  2164. name += ".weight"
  2165. data_torch = data_torch.transpose(-1, -2)
  2166. if "multi_modal_projector" in name or "vision_model" in name:
  2167. return []
  2168. return super().modify_tensors(data_torch, name, bid)
  2169. @ModelBase.register("Llama4ForConditionalGeneration")
  2170. class Llama4VisionModel(MmprojModel):
  2171. def set_gguf_parameters(self):
  2172. super().set_gguf_parameters()
  2173. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.LLAMA4)
  2174. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams["norm_eps"])
  2175. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / self.hparams["pixel_shuffle_ratio"]))
  2176. assert self.hparams["hidden_act"] == "gelu"
  2177. self.gguf_writer.add_vision_use_gelu(True)
  2178. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2179. del bid # unused
  2180. if "multi_modal_projector" in name or "vision_model" in name:
  2181. # process vision tensors
  2182. if "positional_embedding_vlm" in name and ".weight" not in name:
  2183. name += ".weight"
  2184. if "multi_modal_projector.linear_1" in name:
  2185. # despite the name with number postfix, this is a single fully connected layer
  2186. return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC] + '.weight', data_torch)]
  2187. return [(self.map_tensor_name(name), data_torch)]
  2188. return []
  2189. @ModelBase.register("Mistral3ForConditionalGeneration")
  2190. class Mistral3Model(LlamaModel):
  2191. model_arch = gguf.MODEL_ARCH.LLAMA
  2192. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  2193. name = name.replace("language_model.", "")
  2194. if "multi_modal_projector" in name or "vision_tower" in name:
  2195. return []
  2196. return super().modify_tensors(data_torch, name, bid)
  2197. @ModelBase.register("DeciLMForCausalLM")
  2198. class DeciModel(TextModel):
  2199. model_arch = gguf.MODEL_ARCH.DECI
  2200. @staticmethod
  2201. def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int:
  2202. # DeciLM-specific code
  2203. intermediate_size = int(2 * ffn_mult * n_embd / 3)
  2204. return DeciModel._find_multiple(intermediate_size, 256)
  2205. @staticmethod
  2206. def _find_multiple(n: int, k: int) -> int:
  2207. # DeciLM-specific code
  2208. if n % k == 0:
  2209. return n
  2210. return n + k - (n % k)
  2211. def __init__(self, *args, **kwargs):
  2212. super().__init__(*args, **kwargs)
  2213. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  2214. _block_configs: list[dict[str,Any]] = self.hparams["block_configs"]
  2215. assert self.block_count == len(_block_configs)
  2216. self._num_kv_heads = list()
  2217. self._num_heads = list()
  2218. _ffn_multipliers = list()
  2219. # ***linear attention layer***
  2220. # if n_heads_in_group is None and replace_with_linear is True
  2221. # then _num_kv_heads[il] is 0 and _num_heads[il] is num_attention_heads
  2222. # ***attention-free layer***
  2223. # if n_heads_in_group is None and replace_with_linear is False
  2224. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0
  2225. # ***normal attention-layer***
  2226. # if n_heads_in_group is not None, then
  2227. # _num_kv_heads[il] is num_attention_head // n_heads_in_group and
  2228. # _num_heads[il] is num_attention_head
  2229. # ***dummy layer*** for nemotron 253B
  2230. # if n_heads_in_group is None and ffn_mult is None
  2231. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
  2232. for il in range(len(_block_configs)):
  2233. if _block_configs[il]["attention"]["n_heads_in_group"] is None:
  2234. if _block_configs[il]["attention"]["replace_with_linear"] is True:
  2235. self._num_kv_heads.append(0)
  2236. self._num_heads.append(self.hparams["num_attention_heads"])
  2237. else:
  2238. self._num_kv_heads.append(0)
  2239. self._num_heads.append(0)
  2240. else:
  2241. self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
  2242. self._num_heads.append(self.hparams["num_attention_heads"])
  2243. if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
  2244. _ffn_multipliers.append(0.0)
  2245. else:
  2246. _ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
  2247. assert self.block_count == len(self._num_kv_heads)
  2248. assert self.block_count == len(self._num_heads)
  2249. assert self.block_count == len(_ffn_multipliers)
  2250. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  2251. assert isinstance(self._num_heads, list) and isinstance(self._num_heads[0], int)
  2252. assert isinstance(_ffn_multipliers, list) and isinstance(_ffn_multipliers[0], float)
  2253. self._ffn_dims: list[int] = [
  2254. DeciModel._ffn_mult_to_intermediate_size(multiplier, self.hparams["hidden_size"])
  2255. for multiplier in _ffn_multipliers
  2256. ]
  2257. def set_vocab(self):
  2258. # Please change tokenizer_config.json of Llama-3_1-Nemotron-51B's
  2259. # eos_token from '|eot_id|' to '|end_of_text|'
  2260. if self.hparams.get("vocab_size", 128256) == 128256:
  2261. tokens, toktypes, tokpre = self.get_vocab_base()
  2262. self.gguf_writer.add_tokenizer_model("gpt2")
  2263. self.gguf_writer.add_tokenizer_pre(tokpre)
  2264. self.gguf_writer.add_token_list(tokens)
  2265. self.gguf_writer.add_token_types(toktypes)
  2266. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  2267. special_vocab.add_to_gguf(self.gguf_writer)
  2268. else:
  2269. # DeciLM-7B
  2270. self._set_vocab_llama_hf()
  2271. def set_gguf_parameters(self):
  2272. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  2273. assert self.block_count == len(self._num_kv_heads)
  2274. assert self.block_count == len(self._num_heads)
  2275. assert self.block_count == len(self._ffn_dims)
  2276. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  2277. self.gguf_writer.add_rope_freq_base(rope_theta)
  2278. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2279. self.gguf_writer.add_head_count(self._num_heads)
  2280. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  2281. self.gguf_writer.add_block_count(self.block_count)
  2282. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2283. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2284. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2285. self.gguf_writer.add_key_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2286. self.gguf_writer.add_value_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2287. self.gguf_writer.add_file_type(self.ftype)
  2288. else: # DeciLM-7B
  2289. super().set_gguf_parameters()
  2290. if "num_key_value_heads_per_layer" in self.hparams: # DeciLM-7B
  2291. self._num_kv_heads: list[int] = self.hparams["num_key_value_heads_per_layer"]
  2292. assert self.block_count == len(self._num_kv_heads)
  2293. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2294. hparams = self.hparams
  2295. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2296. if (rope_dim := hparams.get("head_dim")) is None:
  2297. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  2298. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2299. rope_scaling = self.hparams.get("rope_scaling") or {}
  2300. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  2301. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2302. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2303. @staticmethod
  2304. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  2305. if n_head_kv is not None and n_head != n_head_kv:
  2306. n_head = n_head_kv
  2307. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2308. .swapaxes(1, 2)
  2309. .reshape(weights.shape))
  2310. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2311. n_head = self.hparams["num_attention_heads"]
  2312. if bid is not None:
  2313. if "num_key_value_heads_per_layer" in self.hparams:
  2314. n_kv_head = self.hparams["num_key_value_heads_per_layer"][bid]
  2315. elif "block_configs" in self.hparams:
  2316. n_kv_head = self._num_kv_heads[bid]
  2317. n_head = self._num_heads[bid]
  2318. else:
  2319. n_kv_head = self.hparams.get("num_key_value_heads")
  2320. else:
  2321. n_kv_head = self.hparams.get("num_key_value_heads")
  2322. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2323. data_torch = DeciModel.permute(data_torch, n_head, n_head)
  2324. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2325. data_torch = DeciModel.permute(data_torch, n_head, n_kv_head)
  2326. return [(self.map_tensor_name(name), data_torch)]
  2327. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2328. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  2329. if rope_scaling.get("rope_type", '').lower() == "llama3":
  2330. base = self.hparams.get("rope_theta", 10000.0)
  2331. if (dim := self.hparams.get("head_dim")) is None:
  2332. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2333. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  2334. factor = rope_scaling.get("factor", 8.0)
  2335. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  2336. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  2337. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  2338. low_freq_wavelen = old_context_len / low_freq_factor
  2339. high_freq_wavelen = old_context_len / high_freq_factor
  2340. assert low_freq_wavelen != high_freq_wavelen
  2341. rope_factors = []
  2342. for freq in freqs:
  2343. wavelen = 2 * math.pi / freq
  2344. if wavelen < high_freq_wavelen:
  2345. rope_factors.append(1)
  2346. elif wavelen > low_freq_wavelen:
  2347. rope_factors.append(factor)
  2348. else:
  2349. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  2350. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  2351. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  2352. def prepare_tensors(self):
  2353. super().prepare_tensors()
  2354. @ModelBase.register("BitnetForCausalLM")
  2355. class BitnetModel(TextModel):
  2356. model_arch = gguf.MODEL_ARCH.BITNET
  2357. def set_vocab(self):
  2358. self._set_vocab_sentencepiece()
  2359. def set_gguf_parameters(self):
  2360. super().set_gguf_parameters()
  2361. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2362. self.gguf_writer.add_rope_scaling_factor(1.0)
  2363. def weight_quant(self, weight: Tensor) -> Tensor:
  2364. dtype = weight.dtype
  2365. weight = weight.float()
  2366. scale = weight.abs().mean().clamp(min=1e-5)
  2367. iscale = 1 / scale
  2368. # TODO: multiply by the scale directly instead of inverting it twice
  2369. # (this is also unnecessarily doubly inverted upstream)
  2370. # ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
  2371. result = (weight * iscale).round().clamp(-1, 1) / iscale
  2372. return result.type(dtype)
  2373. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2374. new_name = self.map_tensor_name(name)
  2375. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  2376. gguf.MODEL_TENSOR.ATTN_Q,
  2377. gguf.MODEL_TENSOR.ATTN_K,
  2378. gguf.MODEL_TENSOR.ATTN_V,
  2379. gguf.MODEL_TENSOR.ATTN_OUT,
  2380. gguf.MODEL_TENSOR.FFN_UP,
  2381. gguf.MODEL_TENSOR.FFN_DOWN,
  2382. gguf.MODEL_TENSOR.FFN_GATE,
  2383. ]):
  2384. # transform weight into 1/0/-1 (in fp32)
  2385. data_torch = self.weight_quant(data_torch)
  2386. yield (new_name, data_torch)
  2387. @ModelBase.register("GrokForCausalLM", "Grok1ForCausalLM")
  2388. class GrokModel(TextModel):
  2389. model_arch = gguf.MODEL_ARCH.GROK
  2390. def set_vocab(self):
  2391. if (self.dir_model / 'tokenizer.model').is_file():
  2392. self._set_vocab_sentencepiece()
  2393. return
  2394. if not (self.dir_model / 'tokenizer.json').is_file() or not (self.dir_model / 'chat_template.jinja').is_file():
  2395. logger.error('Error: Missing vocab and chat template, download files from https://huggingface.co/alvarobartt/grok-2-tokenizer')
  2396. sys.exit(1)
  2397. self._set_vocab_gpt2()
  2398. def __init__(self, *args, **kwargs):
  2399. super().__init__(*args, **kwargs)
  2400. def set_gguf_parameters(self):
  2401. super().set_gguf_parameters()
  2402. self.gguf_writer.add_attn_logit_softcapping(self.hparams.get("attn_logit_softcapping", 30.0))
  2403. self.gguf_writer.add_router_logit_softcapping(self.hparams.get("router_logit_softcapping", 30.0))
  2404. if (final_logit_softcap := self.hparams.get("final_logit_softcapping")):
  2405. self.gguf_writer.add_final_logit_softcapping(final_logit_softcap)
  2406. if (rope_dim := self.hparams.get("head_dim")) is None:
  2407. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2408. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2409. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2410. # Treat "original" as "yarn", seems to have been a mistake
  2411. if self.hparams.get("rope_type") in ("yarn", "original"):
  2412. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2413. self.gguf_writer.add_rope_scaling_factor(self.hparams["scaling_factor"])
  2414. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["original_max_position_embeddings"])
  2415. self.gguf_writer.add_rope_scaling_yarn_ext_factor(self.hparams["extrapolation_factor"])
  2416. self.gguf_writer.add_rope_scaling_yarn_attn_factor(self.hparams["attn_factor"])
  2417. self.gguf_writer.add_rope_scaling_yarn_beta_fast(self.hparams["beta_fast"])
  2418. self.gguf_writer.add_rope_scaling_yarn_beta_slow(self.hparams["beta_slow"])
  2419. if temp_len := self.hparams.get("attn_temperature_len"):
  2420. self.gguf_writer.add_attn_temperature_length(temp_len)
  2421. self.gguf_writer.add_attn_output_scale(self.hparams.get("attn_output_multiplier", rope_dim**-0.5))
  2422. self.gguf_writer.add_embedding_scale(self.hparams["embedding_multiplier_scale"])
  2423. self.gguf_writer.add_logit_scale(self.hparams["output_multiplier_scale"])
  2424. _experts: list[dict[str, list[Tensor]]] | None = None
  2425. _cur_expert = ""
  2426. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2427. tensors: list[tuple[str, Tensor]] = []
  2428. is_expert = ".moe." in name or ".block_sparse_moe.experts." in name
  2429. if not is_expert:
  2430. tensors.append((self.map_tensor_name(name), data_torch))
  2431. # process the experts separately
  2432. if is_expert or self._cur_expert:
  2433. n_experts = self.hparams["num_local_experts"]
  2434. assert bid is not None
  2435. if self._experts is None:
  2436. self._experts = [{} for _ in range(self.block_count)]
  2437. # concatenate split tensors
  2438. if name in self._experts[bid]:
  2439. self._cur_expert = name
  2440. self._experts[bid][name].append(data_torch)
  2441. return []
  2442. elif is_expert:
  2443. self._cur_expert = name
  2444. self._experts[bid][name] = [data_torch]
  2445. return []
  2446. else:
  2447. self._cur_expert = ""
  2448. for bid in range(self.block_count):
  2449. if len(self._experts[bid]) >= n_experts * 3:
  2450. # merge the experts into a single 3d tensor
  2451. for wid in [("linear", "w1", 0), ("linear_1", "w2", 1), ("linear_v", "w3", 0)]:
  2452. datas: list[Tensor] = []
  2453. for xid in range(n_experts):
  2454. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid[0]}.weight"
  2455. if ename not in self._experts[bid]:
  2456. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid[1]}.weight"
  2457. tensor_list = self._experts[bid][ename]
  2458. datas.append(torch.cat(tensor_list, dim=wid[2]) if len(tensor_list) > 1 else tensor_list[0])
  2459. del self._experts[bid][ename]
  2460. data_torch = torch.stack(datas, dim=0)
  2461. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid[0]}.weight"
  2462. new_name = self.map_tensor_name(merged_name)
  2463. yield (new_name, data_torch)
  2464. yield from tensors
  2465. @ModelBase.register("DbrxForCausalLM")
  2466. class DbrxModel(TextModel):
  2467. model_arch = gguf.MODEL_ARCH.DBRX
  2468. def set_gguf_parameters(self):
  2469. ffn_config = self.hparams["ffn_config"]
  2470. attn_config = self.hparams["attn_config"]
  2471. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  2472. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  2473. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  2474. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  2475. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  2476. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  2477. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  2478. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  2479. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  2480. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  2481. self.gguf_writer.add_layer_norm_eps(1e-5)
  2482. self.gguf_writer.add_file_type(self.ftype)
  2483. logger.info(f"gguf: file type = {self.ftype}")
  2484. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2485. del bid # unused
  2486. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  2487. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  2488. n_embd = self.hparams["d_model"]
  2489. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  2490. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  2491. # But llama.cpp moe graph works differently
  2492. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  2493. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  2494. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2495. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  2496. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2497. experts = False
  2498. for exp_tensor_name in exp_tensor_names.keys():
  2499. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  2500. experts = True
  2501. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  2502. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  2503. data_torch = data_torch.permute(*permute_tensor)
  2504. break
  2505. # map tensor names
  2506. # In MoE models the ffn tensors are typically most of the model weights,
  2507. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  2508. # Every other model has the weight names ending in .weight,
  2509. # let's assume that is the convention which is not the case for dbrx:
  2510. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  2511. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  2512. return [(new_name, data_torch)]
  2513. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  2514. del name, new_name, bid # unused
  2515. return n_dims > 1
  2516. @ModelBase.register("MiniCPMForCausalLM")
  2517. class MiniCPMModel(TextModel):
  2518. model_arch = gguf.MODEL_ARCH.MINICPM
  2519. def set_gguf_parameters(self):
  2520. super().set_gguf_parameters()
  2521. embedding_scale = float(self.hparams["scale_emb"])
  2522. self.gguf_writer.add_embedding_scale(embedding_scale)
  2523. logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}")
  2524. residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5
  2525. self.gguf_writer.add_residual_scale(residual_scale)
  2526. logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}")
  2527. logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"]
  2528. self.gguf_writer.add_logit_scale(logit_scale)
  2529. logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}")
  2530. rope_scaling = self.hparams.get("rope_scaling") or {}
  2531. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "longrope":
  2532. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE)
  2533. logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}")
  2534. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2535. rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2536. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2537. if rope_scaling is not None:
  2538. long_factors = rope_scaling.get('long_factor', None)
  2539. short_factors = rope_scaling.get('short_factor', None)
  2540. if long_factors is None or short_factors is None:
  2541. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2542. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2543. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2544. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2545. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2546. def set_vocab(self):
  2547. self._set_vocab_sentencepiece()
  2548. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2549. del bid # unused
  2550. n_head = self.hparams["num_attention_heads"]
  2551. n_kv_head = self.hparams.get("num_key_value_heads")
  2552. # HF models permute some of the tensors, so we need to undo that
  2553. if name.endswith(("q_proj.weight")):
  2554. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2555. if name.endswith(("k_proj.weight")):
  2556. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2557. return [(self.map_tensor_name(name), data_torch)]
  2558. @ModelBase.register("MiniCPM3ForCausalLM")
  2559. class MiniCPM3Model(TextModel):
  2560. model_arch = gguf.MODEL_ARCH.MINICPM3
  2561. def set_gguf_parameters(self):
  2562. hparams = self.hparams
  2563. self.gguf_writer.add_file_type(self.ftype)
  2564. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2565. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2566. self.gguf_writer.add_block_count(self.block_count)
  2567. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2568. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2569. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  2570. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2571. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2572. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2573. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2574. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2575. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2576. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2577. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2578. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2579. if rope_scaling is not None:
  2580. rope_dims = self.hparams["qk_rope_head_dim"]
  2581. long_factors = rope_scaling.get('long_factor', None)
  2582. short_factors = rope_scaling.get('short_factor', None)
  2583. if long_factors is None or short_factors is None:
  2584. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2585. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2586. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2587. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2588. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2589. def set_vocab(self):
  2590. self._set_vocab_sentencepiece()
  2591. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  2592. if n_kv_head is not None and n_head != n_kv_head:
  2593. n_head //= n_kv_head
  2594. return (
  2595. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2596. .swapaxes(1, 2)
  2597. .reshape(weights.shape)
  2598. )
  2599. @ModelBase.register("QWenLMHeadModel")
  2600. class QwenModel(TextModel):
  2601. model_arch = gguf.MODEL_ARCH.QWEN
  2602. @staticmethod
  2603. def token_bytes_to_string(b):
  2604. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2605. byte_encoder = bytes_to_unicode()
  2606. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2607. @staticmethod
  2608. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2609. parts = [bytes([b]) for b in token]
  2610. while True:
  2611. min_idx = None
  2612. min_rank = None
  2613. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2614. rank = mergeable_ranks.get(pair[0] + pair[1])
  2615. if rank is not None and (min_rank is None or rank < min_rank):
  2616. min_idx = i
  2617. min_rank = rank
  2618. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2619. break
  2620. assert min_idx is not None
  2621. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2622. return parts
  2623. def set_vocab(self):
  2624. self._set_vocab_qwen()
  2625. def set_gguf_parameters(self):
  2626. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2627. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2628. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2629. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2630. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  2631. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2632. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2633. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2634. self.gguf_writer.add_file_type(self.ftype)
  2635. @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM", "Qwen2AudioForConditionalGeneration")
  2636. class Qwen2Model(TextModel):
  2637. model_arch = gguf.MODEL_ARCH.QWEN2
  2638. def set_vocab(self):
  2639. try:
  2640. self._set_vocab_sentencepiece()
  2641. except FileNotFoundError:
  2642. self._set_vocab_gpt2()
  2643. def set_gguf_parameters(self):
  2644. super().set_gguf_parameters()
  2645. self._try_set_pooling_type()
  2646. rope_scaling = self.hparams.get("rope_scaling") or {}
  2647. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2648. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2649. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2650. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2651. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2652. if self.hf_arch == "Qwen2Model":
  2653. name = f"model.{name}" # map to Qwen2ForCausalLM tensors
  2654. if "language_model." in name:
  2655. name = name.replace("language_model.", "") # for InternVL
  2656. if name.startswith("mlp") or name.startswith("multi_modal_projector") \
  2657. or name.startswith("vision_model") or name.startswith("audio_tower") \
  2658. or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector"):
  2659. # skip vision and audio tensors
  2660. return []
  2661. yield from super().modify_tensors(data_torch, name, bid)
  2662. @ModelBase.register("DreamModel")
  2663. class DreamModel(TextModel):
  2664. model_arch = gguf.MODEL_ARCH.DREAM
  2665. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  2666. tokens: list[str] = []
  2667. toktypes: list[int] = []
  2668. from transformers import AutoTokenizer
  2669. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  2670. vocab_dict = tokenizer.get_vocab()
  2671. vocab_size = self.hparams.get("vocab_size", len(vocab_dict))
  2672. assert max(vocab_dict.values()) < vocab_size
  2673. tokpre = self.get_vocab_base_pre(tokenizer)
  2674. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab_dict.items()}
  2675. added_vocab = tokenizer.get_added_vocab()
  2676. for i in range(vocab_size):
  2677. if i not in reverse_vocab:
  2678. tokens.append(f"[PAD{i}]")
  2679. toktypes.append(gguf.TokenType.UNUSED)
  2680. elif reverse_vocab[i] in added_vocab:
  2681. tokens.append(reverse_vocab[i])
  2682. # Check if it's a special token - treat special tokens as CONTROL tokens
  2683. if hasattr(tokenizer, 'added_tokens_decoder') and i in tokenizer.added_tokens_decoder:
  2684. if tokenizer.added_tokens_decoder[i].special:
  2685. toktypes.append(gguf.TokenType.CONTROL)
  2686. else:
  2687. toktypes.append(gguf.TokenType.USER_DEFINED)
  2688. else:
  2689. # Fallback: treat all added vocab as control tokens for special tokens like <|im_start|>
  2690. toktypes.append(gguf.TokenType.CONTROL)
  2691. else:
  2692. tokens.append(reverse_vocab[i])
  2693. toktypes.append(gguf.TokenType.NORMAL)
  2694. return tokens, toktypes, tokpre
  2695. def set_vocab(self):
  2696. try:
  2697. self._set_vocab_sentencepiece()
  2698. except FileNotFoundError:
  2699. self._set_vocab_gpt2()
  2700. def set_gguf_parameters(self):
  2701. super().set_gguf_parameters()
  2702. self._try_set_pooling_type()
  2703. # Dream models use non-causal attention for diffusion
  2704. self.gguf_writer.add_causal_attention(False)
  2705. # Handle RoPE scaling similar to Qwen2
  2706. rope_scaling = self.hparams.get("rope_scaling") or {}
  2707. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2708. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2709. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2710. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2711. # Add Dream-specific parameters
  2712. mask_token_id = self.hparams.get("mask_token_id")
  2713. if mask_token_id is not None:
  2714. self.gguf_writer.add_mask_token_id(mask_token_id)
  2715. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2716. # Dream model tensors should be mapped directly since it's the base model
  2717. yield from super().modify_tensors(data_torch, name, bid)
  2718. @ModelBase.register("LLaDAModelLM")
  2719. class LLaDAModel(TextModel):
  2720. model_arch = gguf.MODEL_ARCH.LLADA
  2721. undo_permute = True
  2722. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  2723. tokens: list[str] = []
  2724. toktypes: list[int] = []
  2725. from transformers import AutoTokenizer
  2726. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  2727. vocab_dict = tokenizer.get_vocab()
  2728. vocab_size = self.hparams.get("vocab_size", len(vocab_dict))
  2729. assert max(vocab_dict.values()) < vocab_size
  2730. tokpre = self.get_vocab_base_pre(tokenizer)
  2731. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab_dict.items()}
  2732. added_vocab = tokenizer.get_added_vocab()
  2733. for i in range(vocab_size):
  2734. if i not in reverse_vocab:
  2735. tokens.append(f"[PAD{i}]")
  2736. toktypes.append(gguf.TokenType.UNUSED)
  2737. elif reverse_vocab[i] in added_vocab:
  2738. tokens.append(reverse_vocab[i])
  2739. # Check if it's a special token - treat special tokens as CONTROL tokens
  2740. if hasattr(tokenizer, 'added_tokens_decoder') and i in tokenizer.added_tokens_decoder:
  2741. if tokenizer.added_tokens_decoder[i].special:
  2742. toktypes.append(gguf.TokenType.CONTROL)
  2743. else:
  2744. toktypes.append(gguf.TokenType.USER_DEFINED)
  2745. else:
  2746. # Fallback: treat all added vocab as control tokens for special tokens like <|im_start|>
  2747. toktypes.append(gguf.TokenType.CONTROL)
  2748. else:
  2749. tokens.append(reverse_vocab[i])
  2750. toktypes.append(gguf.TokenType.NORMAL)
  2751. return tokens, toktypes, tokpre
  2752. def set_vocab(self):
  2753. self._set_vocab_gpt2()
  2754. # LLaDA specific parameters
  2755. self.gguf_writer.add_add_bos_token(True)
  2756. def set_gguf_parameters(self):
  2757. super().set_gguf_parameters()
  2758. self._try_set_pooling_type()
  2759. # Add parameters similar to LlamaModel
  2760. hparams = self.hparams
  2761. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2762. if (rope_dim := hparams.get("head_dim")) is None:
  2763. n_heads = hparams.get("num_attention_heads", hparams.get("n_heads"))
  2764. rope_dim = hparams.get("hidden_size", hparams.get("d_model")) // n_heads
  2765. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2766. # Set context length for LLaDA
  2767. context_length = self.hparams.get("max_sequence_length", 4096)
  2768. self.gguf_writer.add_context_length(context_length)
  2769. # Set embedding length (dimension size)
  2770. embedding_length = self.hparams.get("d_model", 4096)
  2771. self.gguf_writer.add_embedding_length(embedding_length)
  2772. # Set feed forward length (MLP hidden size)
  2773. feed_forward_length = self.hparams.get("mlp_hidden_size", 12288)
  2774. self.gguf_writer.add_feed_forward_length(feed_forward_length)
  2775. # LLaDA models use non-causal attention for diffusion, similar to Dream
  2776. self.gguf_writer.add_causal_attention(False)
  2777. # LLaDA models don't shift their logits
  2778. self.gguf_writer.add_diffusion_shift_logits(False)
  2779. @staticmethod
  2780. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  2781. if n_head_kv is not None and n_head != n_head_kv:
  2782. n_head = n_head_kv
  2783. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2784. .swapaxes(1, 2)
  2785. .reshape(weights.shape))
  2786. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2787. n_head = self.hparams.get("num_attention_heads", self.hparams.get("n_heads"))
  2788. n_kv_head = self.hparams.get("num_key_value_heads", self.hparams.get("n_kv_heads"))
  2789. if self.undo_permute:
  2790. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2791. data_torch = LLaDAModel.permute(data_torch, n_head, n_head)
  2792. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2793. data_torch = LLaDAModel.permute(data_torch, n_head, n_kv_head)
  2794. # LLaDA model tensors should be mapped directly since it's the base model
  2795. yield from super().modify_tensors(data_torch, name, bid)
  2796. @ModelBase.register("Ernie4_5_ForCausalLM", "Ernie4_5ForCausalLM")
  2797. class Ernie4_5Model(TextModel):
  2798. model_arch = gguf.MODEL_ARCH.ERNIE4_5
  2799. def set_vocab(self):
  2800. self._set_vocab_sentencepiece()
  2801. def set_gguf_parameters(self):
  2802. super().set_gguf_parameters()
  2803. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2804. num_heads = self.hparams["num_attention_heads"]
  2805. num_kv_heads = self.hparams["num_key_value_heads"]
  2806. if (head_dim := self.hparams.get("head_dim")) is None:
  2807. head_dim = self.hparams["hidden_size"] // num_heads
  2808. if "ernie." in name:
  2809. name = name.replace("ernie.", "model.")
  2810. # split the qkv weights
  2811. # qkv_proj shape: [(num_heads + 2 * num_kv_heads) * head_dim, hidden_size]
  2812. if "qkv_proj" in name:
  2813. name_q = name.replace("qkv_proj.weight", "q_proj.weight")
  2814. name_k = name.replace("qkv_proj.weight", "k_proj.weight")
  2815. name_v = name.replace("qkv_proj.weight", "v_proj.weight")
  2816. total_q_dim = num_heads * head_dim
  2817. total_k_dim = num_kv_heads * head_dim
  2818. total_v_dim = num_kv_heads * head_dim
  2819. q_proj_weight, k_proj_weight, v_proj_weight = data_torch.split([total_q_dim, total_k_dim, total_v_dim], dim=0)
  2820. return [
  2821. (self.map_tensor_name(name_q), q_proj_weight),
  2822. (self.map_tensor_name(name_k), k_proj_weight),
  2823. (self.map_tensor_name(name_v), v_proj_weight)
  2824. ]
  2825. # split the up_gate_proj into gate and up
  2826. # up_gate_proj shape: [2 * intermediate_size, hidden_size]
  2827. if "up_gate_proj" in name:
  2828. name_up = name.replace("up_gate_proj.weight", "up_proj.weight")
  2829. name_gate = name.replace("up_gate_proj.weight", "gate_proj.weight")
  2830. dim_half = data_torch.shape[0] // 2
  2831. gate_proj_weight, up_proj_weight = data_torch.split(dim_half, dim=0)
  2832. return [
  2833. (self.map_tensor_name(name_gate), gate_proj_weight),
  2834. (self.map_tensor_name(name_up), up_proj_weight)
  2835. ]
  2836. return [(self.map_tensor_name(name), data_torch)]
  2837. @ModelBase.register("Ernie4_5_MoeForCausalLM")
  2838. class Ernie4_5MoeModel(Ernie4_5Model):
  2839. model_arch = gguf.MODEL_ARCH.ERNIE4_5_MOE
  2840. _experts: list[dict[str, Tensor]] | None = None
  2841. def __init__(self, *args, **kwargs):
  2842. super().__init__(*args, **kwargs)
  2843. self._experts = [{} for _ in range(self.block_count)]
  2844. def set_gguf_parameters(self):
  2845. super().set_gguf_parameters()
  2846. self.gguf_writer.add_expert_count(self.hparams["moe_num_experts"])
  2847. self.gguf_writer.add_expert_used_count(self.hparams["moe_k"])
  2848. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["moe_layer_interval"])
  2849. self.gguf_writer.add_leading_dense_block_count(self.hparams["moe_layer_start_index"])
  2850. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2851. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2852. if (shared_expert_count := self.hparams.get('moe_num_shared_experts')) is not None:
  2853. self.gguf_writer.add_expert_shared_count(shared_expert_count)
  2854. if shared_expert_count > 0 and (shared_expert_intermediate_size := self.hparams.get('intermediate_size')) is not None and (num_key_value_heads := self.hparams.get('num_key_value_heads')) is not None:
  2855. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size // num_key_value_heads)
  2856. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2857. # Modify correction bias name as in DeepseekV2
  2858. if name.endswith("e_score_correction_bias"):
  2859. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  2860. # skip Multi-Token Prediction (MTP) layers (again, same as DeepseekV2)
  2861. match = re.match(r"model.mtp_block.(\d+)", name)
  2862. if match:
  2863. return []
  2864. # skip all other MTP tensors for now
  2865. match = re.match(r"model.mtp_emb_norm.(\d+)", name)
  2866. if match:
  2867. return []
  2868. match = re.match(r"model.mtp_hidden_norm.(\d+)", name)
  2869. if match:
  2870. return []
  2871. match = re.match(r"model.mtp_linear_proj.(\d+)", name)
  2872. if match:
  2873. return []
  2874. # process the experts separately
  2875. if name.find("mlp.experts") != -1:
  2876. n_experts = self.hparams["moe_num_experts"]
  2877. assert bid is not None
  2878. if self._experts is None:
  2879. self._experts = [{} for _ in range(self.block_count)]
  2880. self._experts[bid][name] = data_torch
  2881. if len(self._experts[bid]) >= n_experts * 3:
  2882. tensors: list[tuple[str, Tensor]] = []
  2883. # merge the experts into a single 3d tensor
  2884. for w_name in ["gate_proj", "up_proj", "down_proj"]:
  2885. datas: list[Tensor] = []
  2886. for xid in range(n_experts):
  2887. ename_to_retrieve = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2888. datas.append(self._experts[bid][ename_to_retrieve])
  2889. del self._experts[bid][ename_to_retrieve]
  2890. data_torch = torch.stack(datas, dim=0)
  2891. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2892. new_name = self.map_tensor_name(merged_name)
  2893. tensors.append((new_name, data_torch))
  2894. return tensors
  2895. else:
  2896. return []
  2897. return [(self.map_tensor_name(name), data_torch)]
  2898. def prepare_tensors(self):
  2899. super().prepare_tensors()
  2900. if self._experts is not None:
  2901. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2902. experts = [k for d in self._experts for k in d.keys()]
  2903. if len(experts) > 0:
  2904. raise ValueError(f"Unprocessed experts: {experts}")
  2905. @ModelBase.register(
  2906. "Qwen2VLModel",
  2907. "Qwen2VLForConditionalGeneration",
  2908. "Qwen2_5_VLForConditionalGeneration",
  2909. "Qwen2_5OmniModel",
  2910. )
  2911. class Qwen2VLModel(TextModel):
  2912. model_arch = gguf.MODEL_ARCH.QWEN2VL
  2913. def set_gguf_parameters(self):
  2914. super().set_gguf_parameters()
  2915. mrope_section = self.hparams["rope_scaling"]["mrope_section"]
  2916. mrope_section += [0] * max(0, 4 - len(mrope_section))
  2917. self.gguf_writer.add_rope_dimension_sections(mrope_section)
  2918. def set_vocab(self):
  2919. try:
  2920. self._set_vocab_sentencepiece()
  2921. except FileNotFoundError:
  2922. self._set_vocab_gpt2()
  2923. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2924. del bid # unused
  2925. if name.startswith("thinker."):
  2926. name = name.replace("thinker.", "")
  2927. if name.startswith("visual") or name.startswith("audio") or \
  2928. name.startswith("talker") or name.startswith("token2wav"):
  2929. # skip multimodal tensors
  2930. return []
  2931. return [(self.map_tensor_name(name), data_torch)]
  2932. @ModelBase.register("Qwen2VLModel", "Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2933. class Qwen2VLVisionModel(MmprojModel):
  2934. def __init__(self, *args, **kwargs):
  2935. super().__init__(*args, **kwargs)
  2936. assert self.hparams_vision is not None
  2937. self.hparams_vision["image_size"] = self.hparams_vision.get("image_size", 560)
  2938. # rename config.json values
  2939. self.hparams_vision["num_attention_heads"] = self.hparams_vision.get("num_heads")
  2940. self.hparams_vision["num_hidden_layers"] = self.hparams_vision.get("depth")
  2941. if "embed_dim" in self.hparams_vision: # qwen2vl
  2942. self.hparams_vision["intermediate_size"] = self.hparams_vision.get("hidden_size")
  2943. self.hparams_vision["hidden_size"] = self.hparams_vision.get("embed_dim")
  2944. def set_gguf_parameters(self):
  2945. super().set_gguf_parameters()
  2946. assert self.hparams_vision is not None
  2947. hparams = self.hparams_vision
  2948. model_type = self.global_config['model_type']
  2949. if model_type == 'qwen2_vl':
  2950. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL)
  2951. elif model_type == 'qwen2_5_vl' or model_type == 'qwen2_5_omni':
  2952. if model_type == 'qwen2_5_omni':
  2953. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25O)
  2954. else:
  2955. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL)
  2956. self.gguf_writer.add_vision_use_silu(True)
  2957. # find n_wa_pattern (window attention pattern)
  2958. fullatt_block_indexes = hparams.get("fullatt_block_indexes")
  2959. assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
  2960. n_wa_pattern = fullatt_block_indexes[0] + 1
  2961. # validate n_wa_pattern
  2962. for i in range(1, len(fullatt_block_indexes)):
  2963. if fullatt_block_indexes[i] - fullatt_block_indexes[i - 1] != n_wa_pattern:
  2964. raise ValueError(f"Invalid fullatt_block_indexes: {fullatt_block_indexes}")
  2965. self.gguf_writer.add_vision_n_wa_pattern(n_wa_pattern)
  2966. else:
  2967. raise ValueError(f"Unknown QwenVL model type: {self.global_config['model_type']}")
  2968. # default values below are taken from HF tranformers code
  2969. self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
  2970. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2971. if ".position_embd." in new_name:
  2972. return gguf.GGMLQuantizationType.F32
  2973. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2974. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2975. del bid # unused
  2976. if name.startswith("visual."):
  2977. # process visual tensors
  2978. # split QKV tensors if needed
  2979. if ".qkv." in name:
  2980. if data_torch.ndim == 2: # weight
  2981. c3, _ = data_torch.shape
  2982. else: # bias
  2983. c3 = data_torch.shape[0]
  2984. assert c3 % 3 == 0
  2985. c = c3 // 3
  2986. wq = data_torch[:c]
  2987. wk = data_torch[c: c * 2]
  2988. wv = data_torch[c * 2:]
  2989. return [
  2990. (self.map_tensor_name(name.replace("qkv", "q")), wq),
  2991. (self.map_tensor_name(name.replace("qkv", "k")), wk),
  2992. (self.map_tensor_name(name.replace("qkv", "v")), wv),
  2993. ]
  2994. elif 'patch_embed.proj.weight' in name:
  2995. # split Conv3D into Conv2Ds
  2996. c1, c2, kt, kh, kw = data_torch.shape
  2997. del c1, c2, kh, kw # unused
  2998. assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
  2999. return [
  3000. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]),
  3001. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]),
  3002. ]
  3003. else:
  3004. return [(self.map_tensor_name(name), data_torch)]
  3005. return [] # skip other tensors
  3006. @ModelBase.register("Qwen2_5OmniModel")
  3007. class Qwen25OmniModel(Qwen2VLVisionModel):
  3008. has_vision_encoder = True
  3009. has_audio_encoder = True
  3010. def __init__(self, *args, **kwargs):
  3011. super().__init__(*args, **kwargs)
  3012. assert self.hparams_audio is not None
  3013. self.hparams_audio["hidden_size"] = self.hparams_audio["d_model"]
  3014. self.hparams_audio["intermediate_size"] = self.hparams_audio["encoder_ffn_dim"]
  3015. self.hparams_audio["num_attention_heads"] = self.hparams_audio["encoder_attention_heads"]
  3016. def set_gguf_parameters(self):
  3017. super().set_gguf_parameters()
  3018. assert self.hparams_audio is not None
  3019. self.gguf_writer.add_audio_num_mel_bins(self.hparams_audio["num_mel_bins"])
  3020. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams_audio.get("layer_norm_eps", 1e-5))
  3021. def get_vision_config(self) -> dict[str, Any] | None:
  3022. return self.global_config["thinker_config"].get("vision_config")
  3023. def get_audio_config(self) -> dict[str, Any] | None:
  3024. return self.global_config["thinker_config"].get("audio_config")
  3025. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  3026. # SinusoidsPositionEmbedding
  3027. assert self.hparams_audio is not None
  3028. max_timescale = 10000
  3029. length = 1500
  3030. channels = self.hparams_audio["hidden_size"]
  3031. log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
  3032. inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float())
  3033. scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
  3034. pos_embd = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1).to(dtype=torch.float32)
  3035. yield ("audio_tower.embed_positions.weight", pos_embd)
  3036. def tensor_force_quant(self, name, new_name, bid, n_dims):
  3037. if ".conv" in name and ".weight" in name:
  3038. return gguf.GGMLQuantizationType.F16
  3039. return super().tensor_force_quant(name, new_name, bid, n_dims)
  3040. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3041. if name.startswith("thinker."):
  3042. name = name.replace("thinker.", "")
  3043. if name.startswith("audio_tower"):
  3044. # process audio tensors
  3045. if "conv1.bias" in name or "conv2.bias" in name:
  3046. # transpose conv1 and conv2 bias
  3047. data_torch = data_torch.unsqueeze(-1)
  3048. if "audio_bos_eos_token" in name:
  3049. # this tensor is left unused in transformers code
  3050. # https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809
  3051. return []
  3052. return [(self.map_tensor_name(name), data_torch)]
  3053. return super().modify_tensors(data_torch, name, bid)
  3054. @ModelBase.register("InternVisionModel")
  3055. class InternVisionModel(MmprojModel):
  3056. def set_gguf_parameters(self):
  3057. assert self.hparams_vision is not None
  3058. if isinstance(self.hparams_vision['image_size'], list):
  3059. self.hparams_vision['image_size'] = self.hparams_vision['image_size'][0]
  3060. if isinstance(self.hparams_vision['patch_size'], list):
  3061. self.hparams_vision['patch_size'] = self.hparams_vision['patch_size'][0]
  3062. super().set_gguf_parameters()
  3063. hparams = self.hparams
  3064. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.INTERNVL)
  3065. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  3066. # hidden_act
  3067. if hparams["hidden_act"] == "silu":
  3068. self.gguf_writer.add_vision_use_silu(True)
  3069. elif hparams["hidden_act"] == "gelu":
  3070. self.gguf_writer.add_vision_use_gelu(True)
  3071. else:
  3072. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  3073. # downsample_ratio
  3074. downsample_ratio = self.global_config.get("downsample_ratio")
  3075. assert downsample_ratio is not None
  3076. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / downsample_ratio))
  3077. def tensor_force_quant(self, name, new_name, bid, n_dims):
  3078. if ".position_embd." in new_name:
  3079. return gguf.GGMLQuantizationType.F32
  3080. return super().tensor_force_quant(name, new_name, bid, n_dims)
  3081. def _mapping_interns1_name(self, name):
  3082. names_map = {
  3083. "model.multi_modal_projector.layer_norm.bias": "mlp1.0.bias",
  3084. "model.multi_modal_projector.layer_norm.weight": "mlp1.0.weight",
  3085. "model.multi_modal_projector.linear_1.bias": "mlp1.1.bias",
  3086. "model.multi_modal_projector.linear_1.weight": "mlp1.1.weight",
  3087. "model.multi_modal_projector.linear_2.bias": "mlp1.3.bias",
  3088. "model.multi_modal_projector.linear_2.weight": "mlp1.3.weight",
  3089. }
  3090. if name in names_map:
  3091. name = names_map[name]
  3092. return name
  3093. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3094. del bid # unused
  3095. vision_prefix = ['vision_model', 'mlp', 'model.vision_tower', 'model.multi_modal_projector']
  3096. # deal with intern-s1 special case
  3097. name = self._mapping_interns1_name(name)
  3098. if any([name.startswith(prefix) for prefix in vision_prefix]):
  3099. # process visual tensors
  3100. # correct name
  3101. if name.startswith("vision_model"):
  3102. name = "vision_tower." + name
  3103. if (".ls" in name or ".lambda_" in name or "position_embedding" in name) and not name.endswith(".weight"):
  3104. name += ".weight"
  3105. # split QKV tensors if needed
  3106. if ".qkv." in name:
  3107. if data_torch.ndim == 2: # weight
  3108. c3, _ = data_torch.shape
  3109. else: # bias
  3110. c3 = data_torch.shape[0]
  3111. assert c3 % 3 == 0
  3112. c = c3 // 3
  3113. wq = data_torch[:c]
  3114. wk = data_torch[c: c * 2]
  3115. wv = data_torch[c * 2:]
  3116. return [
  3117. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.q_proj")), wq),
  3118. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.k_proj")), wk),
  3119. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.v_proj")), wv),
  3120. ]
  3121. return [(self.map_tensor_name(name), data_torch)]
  3122. return [] # skip other tensors
  3123. @ModelBase.register("WavTokenizerDec")
  3124. class WavTokenizerDecModel(TextModel):
  3125. model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC
  3126. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3127. del bid # unused
  3128. if \
  3129. name.endswith("codebook.cluster_size") or \
  3130. name.endswith("codebook.embed_avg") or \
  3131. name.endswith("codebook.inited"):
  3132. logger.debug(f"Skipping {name!r}")
  3133. return []
  3134. logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}")
  3135. return [(self.map_tensor_name(name), data_torch)]
  3136. def set_vocab(self):
  3137. self._set_vocab_none()
  3138. def set_gguf_parameters(self):
  3139. super().set_gguf_parameters()
  3140. self.gguf_writer.add_vocab_size (self.hparams["vocab_size"])
  3141. self.gguf_writer.add_features_length (self.hparams["n_embd_features"])
  3142. self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"])
  3143. self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"])
  3144. self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"])
  3145. self.gguf_writer.add_posnet_embedding_length(self.hparams["posnet"]["n_embd"])
  3146. self.gguf_writer.add_posnet_block_count (self.hparams["posnet"]["n_layer"])
  3147. self.gguf_writer.add_convnext_embedding_length(self.hparams["convnext"]["n_embd"])
  3148. self.gguf_writer.add_convnext_block_count (self.hparams["convnext"]["n_layer"])
  3149. self.gguf_writer.add_causal_attention(False)
  3150. @ModelBase.register("Qwen2MoeForCausalLM")
  3151. class Qwen2MoeModel(TextModel):
  3152. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  3153. def set_gguf_parameters(self):
  3154. super().set_gguf_parameters()
  3155. if (n_experts := self.hparams.get("num_experts")) is not None:
  3156. self.gguf_writer.add_expert_count(n_experts)
  3157. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  3158. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  3159. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  3160. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  3161. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  3162. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  3163. # YaRN is not enabled by default
  3164. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  3165. rope_scaling = self.hparams.get("rope_scaling") or {}
  3166. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  3167. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  3168. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3169. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  3170. _experts: list[dict[str, Tensor]] | None = None
  3171. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3172. # process the experts separately
  3173. name = name.replace("language_model.", "") # InternVL
  3174. if name.startswith("mlp") or name.startswith("vision_model") or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector"):
  3175. # skip visual tensors
  3176. return []
  3177. if name.find("experts") != -1:
  3178. n_experts = self.hparams["num_experts"]
  3179. assert bid is not None
  3180. if self._experts is None:
  3181. self._experts = [{} for _ in range(self.block_count)]
  3182. self._experts[bid][name] = data_torch
  3183. if len(self._experts[bid]) >= n_experts * 3:
  3184. tensors: list[tuple[str, Tensor]] = []
  3185. # merge the experts into a single 3d tensor
  3186. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3187. datas: list[Tensor] = []
  3188. for xid in range(n_experts):
  3189. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3190. datas.append(self._experts[bid][ename])
  3191. del self._experts[bid][ename]
  3192. data_torch = torch.stack(datas, dim=0)
  3193. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3194. new_name = self.map_tensor_name(merged_name)
  3195. tensors.append((new_name, data_torch))
  3196. return tensors
  3197. else:
  3198. return []
  3199. return [(self.map_tensor_name(name), data_torch)]
  3200. def prepare_tensors(self):
  3201. super().prepare_tensors()
  3202. if self._experts is not None:
  3203. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3204. experts = [k for d in self._experts for k in d.keys()]
  3205. if len(experts) > 0:
  3206. raise ValueError(f"Unprocessed experts: {experts}")
  3207. @ModelBase.register("Qwen3ForCausalLM")
  3208. class Qwen3Model(Qwen2Model):
  3209. model_arch = gguf.MODEL_ARCH.QWEN3
  3210. # extra logic for rerank models
  3211. is_rerank: bool = False
  3212. is_tied_embeddings: bool = False
  3213. token_false_id: int | None = None
  3214. token_true_id: int | None = None
  3215. def __init__(self, *args, **kwargs):
  3216. super().__init__(*args, **kwargs)
  3217. # track for intern-s1-mini
  3218. hparams = ModelBase.load_hparams(self.dir_model, is_mistral_format=False)
  3219. self.origin_hf_arch = hparams.get('architectures', [None])[0]
  3220. # a bit hacky, but currently the only way to detect if this is a rerank model
  3221. # ref: https://huggingface.co/Qwen/Qwen3-Reranker-0.6B
  3222. readme_path = self.dir_model / "README.md"
  3223. readme_text = ""
  3224. if readme_path.exists():
  3225. with readme_path.open("r", encoding="utf-8") as f:
  3226. readme_text = f.read()
  3227. if "# Qwen3-Reranker" in readme_text:
  3228. self._find_rerank_config()
  3229. def set_vocab(self):
  3230. # deal with intern-s1-mini
  3231. if self.origin_hf_arch == 'InternS1ForConditionalGeneration':
  3232. self._set_vocab_interns1()
  3233. return
  3234. super().set_vocab()
  3235. def _find_rerank_config(self):
  3236. from transformers import AutoTokenizer
  3237. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  3238. self.is_rerank = True
  3239. self.is_tied_embeddings = self.hparams.get("tie_word_embeddings", False)
  3240. self.token_false_id = tokenizer.convert_tokens_to_ids("no")
  3241. self.token_true_id = tokenizer.convert_tokens_to_ids("yes")
  3242. self.sep_token_id = tokenizer.convert_tokens_to_ids("|")
  3243. assert self.token_false_id is not None and self.token_true_id is not None
  3244. def set_gguf_parameters(self):
  3245. super().set_gguf_parameters()
  3246. if self.is_rerank:
  3247. self.gguf_writer.add_pooling_type(gguf.PoolingType.RANK)
  3248. self.gguf_writer.add_classifier_output_labels(["yes", "no"])
  3249. self.gguf_writer.add_chat_template([{
  3250. "name": "rerank",
  3251. "template": "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n"
  3252. "<|im_start|>user\n<Instruct>: Given a web search query, retrieve relevant passages that answer the query\n<Query>: {query}\n<Document>: {document}<|im_end|>\n"
  3253. "<|im_start|>assistant\n<think>\n\n</think>\n\n"
  3254. }])
  3255. def _get_cls_out_tensor(self, data_torch: Tensor) -> Tensor:
  3256. # extract "yes" and "no" tokens from the output lm_head tensor
  3257. false_row = data_torch[self.token_false_id]
  3258. true_row = data_torch[self.token_true_id]
  3259. return torch.stack([true_row, false_row], dim=0)
  3260. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3261. if self.is_rerank:
  3262. is_tied_head = self.is_tied_embeddings and "embed_tokens" in name
  3263. is_real_head = not self.is_tied_embeddings and "lm_head" in name
  3264. if is_tied_head or is_real_head:
  3265. cls_out_head = (
  3266. gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.CLS_OUT] + ".weight",
  3267. self._get_cls_out_tensor(data_torch),
  3268. )
  3269. if is_tied_head:
  3270. embed = (self.map_tensor_name(name), data_torch)
  3271. return [cls_out_head, embed]
  3272. if is_real_head:
  3273. return [cls_out_head]
  3274. return super().modify_tensors(data_torch, name, bid)
  3275. @ModelBase.register("Qwen3MoeForCausalLM")
  3276. class Qwen3MoeModel(Qwen2MoeModel):
  3277. model_arch = gguf.MODEL_ARCH.QWEN3MOE
  3278. def __init__(self, *args, **kwargs):
  3279. super().__init__(*args, **kwargs)
  3280. hparams = ModelBase.load_hparams(self.dir_model, False)
  3281. self.origin_hf_arch = hparams.get('architectures', [None])[0]
  3282. def set_vocab(self):
  3283. # deal with intern-s1
  3284. if self.origin_hf_arch == 'InternS1ForConditionalGeneration':
  3285. self._set_vocab_interns1()
  3286. return
  3287. super().set_vocab()
  3288. @ModelBase.register("GPT2LMHeadModel")
  3289. class GPT2Model(TextModel):
  3290. model_arch = gguf.MODEL_ARCH.GPT2
  3291. def set_gguf_parameters(self):
  3292. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  3293. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  3294. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  3295. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  3296. self.gguf_writer.add_head_count(self.hparams["n_head"])
  3297. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3298. self.gguf_writer.add_file_type(self.ftype)
  3299. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3300. del bid # unused
  3301. tensors: list[tuple[str, Tensor]] = []
  3302. # we don't need these
  3303. if name.endswith((".attn.bias", ".attn.masked_bias")):
  3304. return tensors
  3305. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  3306. data_torch = data_torch.transpose(1, 0)
  3307. new_name = self.map_tensor_name(name)
  3308. tensors.append((new_name, data_torch))
  3309. return tensors
  3310. @ModelBase.register("PhiForCausalLM")
  3311. class Phi2Model(TextModel):
  3312. model_arch = gguf.MODEL_ARCH.PHI2
  3313. def set_gguf_parameters(self):
  3314. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  3315. rot_pct = self.find_hparam(["partial_rotary_factor"])
  3316. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3317. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3318. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  3319. self.gguf_writer.add_embedding_length(n_embd)
  3320. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  3321. self.gguf_writer.add_block_count(block_count)
  3322. self.gguf_writer.add_head_count(n_head)
  3323. self.gguf_writer.add_head_count_kv(n_head)
  3324. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  3325. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  3326. self.gguf_writer.add_file_type(self.ftype)
  3327. self.gguf_writer.add_add_bos_token(False)
  3328. @ModelBase.register("Phi3ForCausalLM")
  3329. class Phi3MiniModel(TextModel):
  3330. model_arch = gguf.MODEL_ARCH.PHI3
  3331. def set_vocab(self):
  3332. # Phi-4 model uses GPT2Tokenizer
  3333. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3334. if tokenizer_config_file.is_file():
  3335. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3336. tokenizer_config_json = json.load(f)
  3337. tokenizer_class = tokenizer_config_json['tokenizer_class']
  3338. if tokenizer_class == 'GPT2Tokenizer':
  3339. return self._set_vocab_gpt2()
  3340. from sentencepiece import SentencePieceProcessor
  3341. tokenizer_path = self.dir_model / 'tokenizer.model'
  3342. if not tokenizer_path.is_file():
  3343. raise ValueError(f'Error: Missing {tokenizer_path}')
  3344. tokenizer = SentencePieceProcessor()
  3345. tokenizer.LoadFromFile(str(tokenizer_path))
  3346. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3347. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3348. scores: list[float] = [-10000.0] * vocab_size
  3349. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3350. for token_id in range(tokenizer.vocab_size()):
  3351. piece = tokenizer.IdToPiece(token_id)
  3352. text = piece.encode("utf-8")
  3353. score = tokenizer.GetScore(token_id)
  3354. toktype = SentencePieceTokenTypes.NORMAL
  3355. if tokenizer.IsUnknown(token_id):
  3356. toktype = SentencePieceTokenTypes.UNKNOWN
  3357. elif tokenizer.IsControl(token_id):
  3358. toktype = SentencePieceTokenTypes.CONTROL
  3359. elif tokenizer.IsUnused(token_id):
  3360. toktype = SentencePieceTokenTypes.UNUSED
  3361. elif tokenizer.IsByte(token_id):
  3362. toktype = SentencePieceTokenTypes.BYTE
  3363. tokens[token_id] = text
  3364. scores[token_id] = score
  3365. toktypes[token_id] = toktype
  3366. added_tokens_file = self.dir_model / 'added_tokens.json'
  3367. if added_tokens_file.is_file():
  3368. with open(added_tokens_file, "r", encoding="utf-8") as f:
  3369. added_tokens_json = json.load(f)
  3370. for key in added_tokens_json:
  3371. token_id = added_tokens_json[key]
  3372. if token_id >= vocab_size:
  3373. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  3374. continue
  3375. tokens[token_id] = key.encode("utf-8")
  3376. scores[token_id] = -1000.0
  3377. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3378. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3379. if tokenizer_config_file.is_file():
  3380. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3381. tokenizer_config_json = json.load(f)
  3382. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  3383. for token_id, foken_data in added_tokens_decoder.items():
  3384. token_id = int(token_id)
  3385. token = foken_data["content"].encode("utf-8")
  3386. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3387. if tokens[token_id] != token:
  3388. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3389. tokens[token_id] = token
  3390. scores[token_id] = -1000.0
  3391. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3392. if foken_data.get("special"):
  3393. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3394. tokenizer_file = self.dir_model / 'tokenizer.json'
  3395. if tokenizer_file.is_file():
  3396. with open(tokenizer_file, "r", encoding="utf-8") as f:
  3397. tokenizer_json = json.load(f)
  3398. added_tokens = tokenizer_json.get("added_tokens", [])
  3399. for foken_data in added_tokens:
  3400. token_id = int(foken_data["id"])
  3401. token = foken_data["content"].encode("utf-8")
  3402. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3403. if tokens[token_id] != token:
  3404. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3405. tokens[token_id] = token
  3406. scores[token_id] = -1000.0
  3407. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3408. if foken_data.get("special"):
  3409. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3410. self.gguf_writer.add_tokenizer_model("llama")
  3411. self.gguf_writer.add_tokenizer_pre("default")
  3412. self.gguf_writer.add_token_list(tokens)
  3413. self.gguf_writer.add_token_scores(scores)
  3414. self.gguf_writer.add_token_types(toktypes)
  3415. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3416. special_vocab.add_to_gguf(self.gguf_writer)
  3417. def set_gguf_parameters(self):
  3418. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  3419. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3420. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3421. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  3422. rms_eps = self.find_hparam(["rms_norm_eps"])
  3423. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  3424. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  3425. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  3426. rope_dims = int(rot_pct * n_embd) // n_head
  3427. self.gguf_writer.add_context_length(max_pos_embds)
  3428. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  3429. self.gguf_writer.add_embedding_length(n_embd)
  3430. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  3431. self.gguf_writer.add_block_count(block_count)
  3432. self.gguf_writer.add_head_count(n_head)
  3433. self.gguf_writer.add_head_count_kv(n_head_kv)
  3434. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  3435. self.gguf_writer.add_rope_dimension_count(rope_dims)
  3436. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  3437. self.gguf_writer.add_file_type(self.ftype)
  3438. sliding_window = self.hparams.get("sliding_window")
  3439. # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
  3440. if sliding_window is None:
  3441. sliding_window = 0
  3442. self.gguf_writer.add_sliding_window(sliding_window)
  3443. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  3444. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3445. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3446. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  3447. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  3448. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  3449. rope_dims = int(rot_pct * n_embd) // n_head
  3450. # write rope scaling for long context (128k) model
  3451. rope_scaling = self.find_hparam(['rope_scaling'], True)
  3452. if rope_scaling is None:
  3453. return
  3454. scale = max_pos_embds / orig_max_pos_embds
  3455. rope_scaling_type = rope_scaling.get('rope_type', rope_scaling.get('type', '')).lower()
  3456. if len(rope_scaling_type) == 0:
  3457. raise KeyError('Missing the required key rope_scaling.type')
  3458. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  3459. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  3460. elif rope_scaling_type == 'yarn':
  3461. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  3462. else:
  3463. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  3464. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  3465. long_factors = rope_scaling.get('long_factor', None)
  3466. short_factors = rope_scaling.get('short_factor', None)
  3467. if long_factors is None or short_factors is None:
  3468. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  3469. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  3470. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}. long_factors = {len(long_factors)}, short_factors = {len(short_factors)}.')
  3471. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  3472. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  3473. @ModelBase.register("PhiMoEForCausalLM")
  3474. class PhiMoeModel(Phi3MiniModel):
  3475. model_arch = gguf.MODEL_ARCH.PHIMOE
  3476. _experts: list[dict[str, Tensor]] | None = None
  3477. def set_gguf_parameters(self):
  3478. super().set_gguf_parameters()
  3479. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  3480. self.gguf_writer.add_expert_count(self.hparams["num_local_experts"])
  3481. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3482. # process the experts separately
  3483. if name.find("block_sparse_moe.experts") != -1:
  3484. n_experts = self.hparams["num_local_experts"]
  3485. assert bid is not None
  3486. if self._experts is None:
  3487. self._experts = [{} for _ in range(self.block_count)]
  3488. self._experts[bid][name] = data_torch
  3489. if len(self._experts[bid]) >= n_experts * 3:
  3490. tensors: list[tuple[str, Tensor]] = []
  3491. # merge the experts into a single 3d tensor
  3492. for w_name in ["w1", "w2", "w3"]:
  3493. datas: list[Tensor] = []
  3494. for xid in range(n_experts):
  3495. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  3496. datas.append(self._experts[bid][ename])
  3497. del self._experts[bid][ename]
  3498. data_torch = torch.stack(datas, dim=0)
  3499. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  3500. new_name = self.map_tensor_name(merged_name)
  3501. tensors.append((new_name, data_torch))
  3502. return tensors
  3503. else:
  3504. return []
  3505. return [(self.map_tensor_name(name), data_torch)]
  3506. def prepare_tensors(self):
  3507. super().prepare_tensors()
  3508. if self._experts is not None:
  3509. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3510. experts = [k for d in self._experts for k in d.keys()]
  3511. if len(experts) > 0:
  3512. raise ValueError(f"Unprocessed experts: {experts}")
  3513. @ModelBase.register("PlamoForCausalLM")
  3514. class PlamoModel(TextModel):
  3515. model_arch = gguf.MODEL_ARCH.PLAMO
  3516. def set_vocab(self):
  3517. self._set_vocab_sentencepiece()
  3518. def set_gguf_parameters(self):
  3519. hparams = self.hparams
  3520. block_count = hparams["num_hidden_layers"]
  3521. self.gguf_writer.add_context_length(4096) # not in config.json
  3522. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3523. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3524. self.gguf_writer.add_block_count(block_count)
  3525. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3526. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  3527. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  3528. self.gguf_writer.add_file_type(self.ftype)
  3529. def shuffle_attn_q_weight(self, data_torch):
  3530. assert data_torch.size() == (5120, 5120)
  3531. data_torch = data_torch.reshape(8, 5, 128, 5120)
  3532. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  3533. data_torch = torch.reshape(data_torch, (5120, 5120))
  3534. return data_torch
  3535. def shuffle_attn_output_weight(self, data_torch):
  3536. assert data_torch.size() == (5120, 5120)
  3537. data_torch = data_torch.reshape(5120, 8, 5, 128)
  3538. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  3539. data_torch = torch.reshape(data_torch, (5120, 5120))
  3540. return data_torch
  3541. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3542. del bid # unused
  3543. new_name = self.map_tensor_name(name)
  3544. # shuffle for broadcasting of gqa in ggml_mul_mat
  3545. if new_name.endswith("attn_q.weight"):
  3546. data_torch = self.shuffle_attn_q_weight(data_torch)
  3547. elif new_name.endswith("attn_output.weight"):
  3548. data_torch = self.shuffle_attn_output_weight(data_torch)
  3549. return [(new_name, data_torch)]
  3550. @ModelBase.register("Plamo2ForCausalLM", "PLaMo2ForCausalLM")
  3551. class Plamo2Model(TextModel):
  3552. model_arch = gguf.MODEL_ARCH.PLAMO2
  3553. def set_vocab(self):
  3554. # PLaMo 2 uses a custom tokenizer with a .jsonl file
  3555. # We need to handle this specially
  3556. tokenizer_jsonl_path = self.dir_model / "tokenizer.jsonl"
  3557. tokenizer_config_path = self.dir_model / "tokenizer_config.json"
  3558. if not tokenizer_jsonl_path.is_file():
  3559. raise FileNotFoundError(f"PLaMo 2 tokenizer file not found: {tokenizer_jsonl_path}")
  3560. # Load tokenizer config
  3561. with open(tokenizer_config_path, 'r', encoding='utf-8') as f:
  3562. tokenizer_config = json.load(f)
  3563. # Load tokens from JSONL file (actually a list format)
  3564. tokens = []
  3565. scores = []
  3566. toktypes = []
  3567. with open(tokenizer_jsonl_path, 'r', encoding='utf-8') as f:
  3568. for line_num, line in enumerate(f):
  3569. if line.strip():
  3570. token_data = json.loads(line)
  3571. # Format: [token, score, type, ?, ?, ?, ?]
  3572. token = token_data[0].encode("utf-8")
  3573. score = float(token_data[1])
  3574. token_type_str = token_data[2] if len(token_data) > 2 else "NORMAL"
  3575. tokens.append(token)
  3576. scores.append(score)
  3577. # Map token type strings to GGUF token types
  3578. if token_type_str == "UNKNOWN":
  3579. toktypes.append(gguf.TokenType.UNKNOWN)
  3580. elif token_type_str == "CONTROL":
  3581. toktypes.append(gguf.TokenType.CONTROL)
  3582. elif token_type_str == "BYTE":
  3583. toktypes.append(gguf.TokenType.BYTE)
  3584. else:
  3585. # Check for PLaMo-2 special tokens
  3586. token_str = token_data[0]
  3587. if token_str.startswith("<|plamo:") and token_str.endswith("|>"):
  3588. toktypes.append(gguf.TokenType.CONTROL)
  3589. else:
  3590. toktypes.append(gguf.TokenType.NORMAL)
  3591. vocab_size = self.hparams["vocab_size"]
  3592. if vocab_size > len(tokens):
  3593. pad_count = vocab_size - len(tokens)
  3594. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  3595. for i in range(1, pad_count + 1):
  3596. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  3597. scores.append(-1000.0)
  3598. toktypes.append(gguf.TokenType.UNUSED)
  3599. # Use "plamo2" tokenizer type for PLaMo-2's custom Aho-Corasick tokenizer
  3600. self.gguf_writer.add_tokenizer_model("plamo2")
  3601. self.gguf_writer.add_tokenizer_pre("default")
  3602. self.gguf_writer.add_token_list(tokens)
  3603. self.gguf_writer.add_token_scores(scores)
  3604. self.gguf_writer.add_token_types(toktypes)
  3605. # Add special tokens from config
  3606. if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] is not None:
  3607. token_id = tokens.index(tokenizer_config["bos_token"].encode("utf-8"))
  3608. self.gguf_writer.add_bos_token_id(token_id)
  3609. if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] is not None:
  3610. token_id = tokens.index(tokenizer_config["eos_token"].encode("utf-8"))
  3611. self.gguf_writer.add_eos_token_id(token_id)
  3612. if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] is not None:
  3613. token_id = tokens.index(tokenizer_config["pad_token"].encode("utf-8"))
  3614. self.gguf_writer.add_pad_token_id(token_id)
  3615. if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] is not None:
  3616. token_id = tokens.index(tokenizer_config["sep_token"].encode("utf-8"))
  3617. self.gguf_writer.add_sep_token_id(token_id)
  3618. if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] is not None:
  3619. token_id = tokens.index(tokenizer_config["unk_token"].encode("utf-8"))
  3620. self.gguf_writer.add_unk_token_id(token_id)
  3621. # Add <|plamo:op|> as EOT to ensure appropriate end of generation
  3622. self.gguf_writer.add_eot_token_id(4)
  3623. self.gguf_writer.add_add_space_prefix(False)
  3624. def set_gguf_parameters(self):
  3625. hparams = self.hparams
  3626. block_count = hparams["num_hidden_layers"]
  3627. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  3628. # Which layers are Mamba layers
  3629. # PLaMo 2 uses mamba_step to indicate the pattern (e.g., 2 means every other layer)
  3630. # This logic matches modeling_plamo.py's is_mamba function
  3631. mamba_step = hparams.get("mamba_step", 2)
  3632. mamba_enabled = hparams.get("mamba_enabled", True)
  3633. num_key_value_heads = []
  3634. num_attention_heads = []
  3635. if mamba_enabled:
  3636. for i in range(block_count):
  3637. if block_count <= (mamba_step // 2):
  3638. # use attention in last layer
  3639. is_mamba = (i != block_count - 1)
  3640. else:
  3641. is_mamba = (i % mamba_step) != (mamba_step // 2)
  3642. if is_mamba:
  3643. num_key_value_heads.append(0)
  3644. num_attention_heads.append(0)
  3645. else:
  3646. num_key_value_heads.append(hparams.get("num_key_value_heads", 4))
  3647. num_attention_heads.append(hparams.get("num_attention_heads", 32))
  3648. if num_key_value_heads and num_attention_heads:
  3649. self.gguf_writer.add_head_count_kv(num_key_value_heads)
  3650. self.gguf_writer.add_head_count(num_attention_heads)
  3651. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 2048))
  3652. self.gguf_writer.add_embedding_length(hparams.get("hidden_size", 4096))
  3653. self.gguf_writer.add_key_length(hparams.get("hidden_size_per_head", 128))
  3654. self.gguf_writer.add_value_length(hparams.get("hidden_size_per_head", 128))
  3655. self.gguf_writer.add_block_count(block_count)
  3656. self.gguf_writer.add_layer_norm_rms_eps(hparams.get("rms_norm_eps", 1e-06))
  3657. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 10000))
  3658. # Mamba parameters
  3659. self.gguf_writer.add_ssm_state_size(hparams.get("mamba_d_state", 64))
  3660. self.gguf_writer.add_ssm_conv_kernel(hparams.get("mamba_d_conv", 4))
  3661. self.gguf_writer.add_ssm_time_step_rank(hparams.get("mamba_num_heads", 64))
  3662. intermediate_size = hparams.get("mamba_num_heads", 64) * hparams.get("hidden_size_per_head", 128)
  3663. self.gguf_writer.add_ssm_inner_size(intermediate_size)
  3664. self.gguf_writer.add_ssm_group_count(0)
  3665. # MLP feed forward parameters (for attention layers)
  3666. self.gguf_writer.add_feed_forward_length(hparams.get("intermediate_size", 13312))
  3667. self.gguf_writer.add_file_type(self.ftype)
  3668. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3669. del bid # unused
  3670. if name.endswith(".A_log"):
  3671. data_torch = -torch.exp(data_torch)
  3672. elif name.endswith(".dt_bias"):
  3673. name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
  3674. elif name.endswith(".dt_norm_weight"):
  3675. name = name.rpartition(".dt_norm_weight")[0] + ".dt_norm.weight"
  3676. elif name.endswith(".B_norm_weight"):
  3677. name = name.rpartition(".B_norm_weight")[0] + ".B_norm.weight"
  3678. elif name.endswith(".C_norm_weight"):
  3679. name = name.rpartition(".C_norm_weight")[0] + ".C_norm.weight"
  3680. elif name.endswith(".k_weight"):
  3681. name = name.rpartition(".k_weight")[0] + ".k.weight"
  3682. elif name.endswith(".q_weight"):
  3683. name = name.rpartition(".q_weight")[0] + ".q.weight"
  3684. elif name.endswith(".conv1d.weight"):
  3685. data_torch = torch.squeeze(data_torch) # remove (, 1, )
  3686. assert data_torch.ndim == 2
  3687. elif name.endswith(".pre_mixer_norm.weight"):
  3688. data_torch += 1.0
  3689. elif name.endswith(".post_mixer_norm.weight"):
  3690. data_torch += 1.0 / 5
  3691. elif name.endswith(".pre_mlp_norm.weight"):
  3692. data_torch += 1.0
  3693. elif name.endswith(".post_mlp_norm.weight"):
  3694. data_torch += 1.0 / (5**1.5)
  3695. elif name.endswith(".norm.weight"):
  3696. data_torch += 1.0
  3697. new_name = self.map_tensor_name(name)
  3698. return [(new_name, data_torch)]
  3699. @ModelBase.register("CodeShellForCausalLM")
  3700. class CodeShellModel(TextModel):
  3701. model_arch = gguf.MODEL_ARCH.CODESHELL
  3702. def set_gguf_parameters(self):
  3703. block_count = self.hparams["n_layer"]
  3704. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  3705. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  3706. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  3707. self.gguf_writer.add_block_count(block_count)
  3708. self.gguf_writer.add_head_count(self.hparams["n_head"])
  3709. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  3710. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3711. self.gguf_writer.add_file_type(self.ftype)
  3712. self.gguf_writer.add_rope_freq_base(10000.0)
  3713. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3714. self.gguf_writer.add_rope_scaling_factor(1.0)
  3715. @ModelBase.register("InternLM2ForCausalLM")
  3716. class InternLM2Model(TextModel):
  3717. model_arch = gguf.MODEL_ARCH.INTERNLM2
  3718. def set_vocab(self):
  3719. # (TODO): Is there a better way?
  3720. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  3721. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  3722. # recognized as an empty string in C++.
  3723. from sentencepiece import SentencePieceProcessor
  3724. from sentencepiece import sentencepiece_model_pb2 as model
  3725. tokenizer_path = self.dir_model / 'tokenizer.model'
  3726. tokens: list[bytes] = []
  3727. scores: list[float] = []
  3728. toktypes: list[int] = []
  3729. if not tokenizer_path.is_file():
  3730. logger.error(f'Error: Missing {tokenizer_path}')
  3731. sys.exit(1)
  3732. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  3733. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  3734. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  3735. tokenizer = SentencePieceProcessor()
  3736. tokenizer.LoadFromFile(str(tokenizer_path))
  3737. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3738. for token_id in range(vocab_size):
  3739. piece = tokenizer.IdToPiece(token_id)
  3740. text = piece.encode("utf-8")
  3741. score = tokenizer.GetScore(token_id)
  3742. if text == b"\x00":
  3743. # (TODO): fixme
  3744. # Hack here and replace the \x00 characters.
  3745. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  3746. text = "🐉".encode("utf-8")
  3747. toktype = SentencePieceTokenTypes.NORMAL
  3748. if tokenizer.IsUnknown(token_id):
  3749. toktype = SentencePieceTokenTypes.UNKNOWN
  3750. elif tokenizer.IsControl(token_id):
  3751. toktype = SentencePieceTokenTypes.CONTROL
  3752. elif tokenizer.IsUnused(token_id):
  3753. toktype = SentencePieceTokenTypes.UNUSED
  3754. elif tokenizer.IsByte(token_id):
  3755. toktype = SentencePieceTokenTypes.BYTE
  3756. # take care of ununsed raw token
  3757. if piece.startswith('[UNUSED'):
  3758. toktype = SentencePieceTokenTypes.UNUSED
  3759. tokens.append(text)
  3760. scores.append(score)
  3761. toktypes.append(toktype)
  3762. added_tokens_file = self.dir_model / 'added_tokens.json'
  3763. if added_tokens_file.is_file():
  3764. with open(added_tokens_file, "r", encoding="utf-8") as f:
  3765. added_tokens_json = json.load(f)
  3766. for key in added_tokens_json:
  3767. tokens.append(key.encode("utf-8"))
  3768. scores.append(-1000.0)
  3769. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  3770. chat_eos_token = '<|im_end|>'
  3771. chat_eos_token_id = None
  3772. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3773. if tokenizer_config_file.is_file():
  3774. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3775. tokenizer_config_json = json.load(f)
  3776. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  3777. for token_id, foken_data in added_tokens_decoder.items():
  3778. token_id = int(token_id)
  3779. token = foken_data["content"]
  3780. if token == chat_eos_token:
  3781. chat_eos_token_id = token_id
  3782. token = token.encode("utf-8")
  3783. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3784. if tokens[token_id] != token:
  3785. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3786. tokens[token_id] = token
  3787. scores[token_id] = -1000.0
  3788. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3789. if foken_data.get("special"):
  3790. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3791. tokenizer_file = self.dir_model / 'tokenizer.json'
  3792. if tokenizer_file.is_file():
  3793. with open(tokenizer_file, "r", encoding="utf-8") as f:
  3794. tokenizer_json = json.load(f)
  3795. added_tokens = tokenizer_json.get("added_tokens", [])
  3796. for foken_data in added_tokens:
  3797. token_id = int(foken_data["id"])
  3798. token = foken_data["content"]
  3799. if token == chat_eos_token:
  3800. chat_eos_token_id = token_id
  3801. token = token.encode("utf-8")
  3802. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3803. if tokens[token_id] != token:
  3804. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3805. tokens[token_id] = token
  3806. scores[token_id] = -1000.0
  3807. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3808. if foken_data.get("special"):
  3809. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3810. self.gguf_writer.add_tokenizer_model("llama")
  3811. self.gguf_writer.add_tokenizer_pre("default")
  3812. self.gguf_writer.add_token_list(tokens)
  3813. self.gguf_writer.add_token_scores(scores)
  3814. self.gguf_writer.add_token_types(toktypes)
  3815. self.gguf_writer.add_add_space_prefix(add_prefix)
  3816. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3817. old_eos = special_vocab.special_token_ids["eos"]
  3818. if chat_eos_token_id is not None:
  3819. # For the chat model, we replace the eos with '<|im_end|>'.
  3820. # TODO: this is a hack, should be fixed
  3821. # https://github.com/ggml-org/llama.cpp/pull/6745#issuecomment-2067687048
  3822. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  3823. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  3824. " in chat mode so that the conversation can end normally.")
  3825. special_vocab.add_to_gguf(self.gguf_writer)
  3826. def set_gguf_parameters(self):
  3827. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  3828. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  3829. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  3830. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  3831. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  3832. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  3833. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3834. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  3835. self.gguf_writer.add_file_type(self.ftype)
  3836. rope_scaling = self.hparams.get("rope_scaling") or {}
  3837. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  3838. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3839. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3840. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3841. num_heads = self.hparams["num_attention_heads"]
  3842. num_kv_heads = self.hparams["num_key_value_heads"]
  3843. n_embd = self.hparams["hidden_size"]
  3844. q_per_kv = num_heads // num_kv_heads
  3845. head_dim = n_embd // num_heads
  3846. num_groups = num_heads // q_per_kv
  3847. name = name.replace("language_model.", "") # InternVL
  3848. if name.startswith("mlp") or name.startswith("vision_model"):
  3849. # skip visual tensors
  3850. return []
  3851. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  3852. qkv = data_torch
  3853. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  3854. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  3855. # The model weights of q and k equire additional reshape.
  3856. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  3857. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  3858. v = v.reshape((-1, v.shape[-1]))
  3859. return [
  3860. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  3861. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  3862. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  3863. ]
  3864. else:
  3865. return [(self.map_tensor_name(name), data_torch)]
  3866. @ModelBase.register("InternLM3ForCausalLM")
  3867. class InternLM3Model(TextModel):
  3868. model_arch = gguf.MODEL_ARCH.LLAMA
  3869. def set_vocab(self):
  3870. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  3871. self.gguf_writer.add_tokenizer_model("llama")
  3872. self.gguf_writer.add_tokenizer_pre("default")
  3873. self.gguf_writer.add_token_list(tokens)
  3874. self.gguf_writer.add_token_scores(scores)
  3875. self.gguf_writer.add_token_types(toktypes)
  3876. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3877. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3878. if tokenizer_config_file.is_file():
  3879. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3880. tokenizer_config_json = json.load(f)
  3881. if "add_prefix_space" in tokenizer_config_json:
  3882. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  3883. if "added_tokens_decoder" in tokenizer_config_json:
  3884. for token_id, token_data in tokenizer_config_json["added_tokens_decoder"].items():
  3885. if token_data.get("special"):
  3886. token_id = int(token_id)
  3887. token = token_data["content"]
  3888. special_vocab._set_special_token(token, token_id)
  3889. # update eos token
  3890. if token == '<|im_end|>' and "eos" in special_vocab.special_token_ids:
  3891. special_vocab.special_token_ids["eos"] = token_id
  3892. special_vocab.add_to_gguf(self.gguf_writer)
  3893. def set_gguf_parameters(self):
  3894. super().set_gguf_parameters()
  3895. hparams = self.hparams
  3896. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3897. if (rope_dim := hparams.get("head_dim")) is None:
  3898. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  3899. self.gguf_writer.add_rope_dimension_count(rope_dim)
  3900. rope_scaling = self.hparams.get("rope_scaling") or {}
  3901. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  3902. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3903. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3904. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3905. n_head = self.hparams["num_attention_heads"]
  3906. n_kv_head = self.hparams.get("num_key_value_heads")
  3907. name = name.replace("language_model.", "") # InternVL
  3908. if name.startswith("mlp") or name.startswith("vision_model"):
  3909. # skip visual tensors
  3910. return []
  3911. if name.endswith(("q_proj.weight", "q_proj.bias")):
  3912. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3913. if name.endswith(("k_proj.weight", "k_proj.bias")):
  3914. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3915. return [(self.map_tensor_name(name), data_torch)]
  3916. @ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel", "BertForSequenceClassification")
  3917. class BertModel(TextModel):
  3918. model_arch = gguf.MODEL_ARCH.BERT
  3919. def __init__(self, *args, **kwargs):
  3920. super().__init__(*args, **kwargs)
  3921. self.vocab_size = None
  3922. if cls_out_labels := self.hparams.get("id2label"):
  3923. if len(cls_out_labels) == 2 and cls_out_labels[0] == "LABEL_0":
  3924. # Remove dummy labels added by AutoConfig
  3925. cls_out_labels = None
  3926. self.cls_out_labels = cls_out_labels
  3927. def set_gguf_parameters(self):
  3928. super().set_gguf_parameters()
  3929. self.gguf_writer.add_causal_attention(False)
  3930. self._try_set_pooling_type()
  3931. if self.cls_out_labels:
  3932. self.gguf_writer.add_classifier_output_labels([v for k, v in sorted(self.cls_out_labels.items())])
  3933. def set_vocab(self):
  3934. tokens, toktypes, tokpre = self.get_vocab_base()
  3935. self.vocab_size = len(tokens)
  3936. # we need this to validate the size of the token_type embeddings
  3937. # though currently we are passing all zeros to the token_type embeddings
  3938. # "Sequence A" or "Sequence B"
  3939. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3940. # convert to phantom space vocab
  3941. def phantom(tok):
  3942. if tok.startswith("[") and tok.endswith("]"):
  3943. return tok
  3944. if tok.startswith("##"):
  3945. return tok[2:]
  3946. return "\u2581" + tok
  3947. tokens = list(map(phantom, tokens))
  3948. # add vocab to gguf
  3949. self.gguf_writer.add_tokenizer_model("bert")
  3950. self.gguf_writer.add_tokenizer_pre(tokpre)
  3951. self.gguf_writer.add_token_list(tokens)
  3952. self.gguf_writer.add_token_types(toktypes)
  3953. # handle special tokens
  3954. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3955. special_vocab.add_to_gguf(self.gguf_writer)
  3956. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3957. del bid # unused
  3958. if name.startswith("bert."):
  3959. name = name[5:]
  3960. if name.endswith(".gamma"):
  3961. name = name[:-6] + ".weight"
  3962. if name.endswith(".beta"):
  3963. name = name[:-5] + ".bias"
  3964. # we are only using BERT for embeddings so we don't need the pooling layer
  3965. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  3966. return [] # we don't need these
  3967. if name.startswith("cls.predictions"):
  3968. return []
  3969. if name.startswith("cls.seq_relationship"):
  3970. return []
  3971. if self.cls_out_labels:
  3972. # For BertForSequenceClassification (direct projection layer)
  3973. if name == "classifier.weight":
  3974. name = "classifier.out_proj.weight"
  3975. if name == "classifier.bias":
  3976. name = "classifier.out_proj.bias"
  3977. return [(self.map_tensor_name(name), data_torch)]
  3978. def _xlmroberta_tokenizer_init(self) -> None:
  3979. # we need the pad_token_id to know how to chop down position_embd matrix
  3980. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3981. self._position_offset = 1 + pad_token_id
  3982. if "max_position_embeddings" in self.hparams:
  3983. self.hparams["max_position_embeddings"] -= self._position_offset
  3984. else:
  3985. self._position_offset = None
  3986. def _xlmroberta_set_vocab(self) -> None:
  3987. # to avoid TypeError: Descriptors cannot be created directly
  3988. # exception when importing sentencepiece_model_pb2
  3989. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  3990. from sentencepiece import SentencePieceProcessor
  3991. from sentencepiece import sentencepiece_model_pb2 as model
  3992. tokenizer_path = self.dir_model / 'sentencepiece.bpe.model'
  3993. tokenizer_json = {}
  3994. tokenizer_config_json = {}
  3995. if not tokenizer_path.is_file():
  3996. tokenizer_path = self.dir_model / 'tokenizer.json'
  3997. tokenizer_config_path = self.dir_model / 'tokenizer_config.json'
  3998. if not tokenizer_path.is_file():
  3999. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  4000. from base64 import b64decode
  4001. from transformers import AutoTokenizer
  4002. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  4003. with open(tokenizer_path, "r", encoding="utf-8") as fp:
  4004. tokenizer_json = json.load(fp)
  4005. if tokenizer_config_path.is_file():
  4006. with open(tokenizer_config_path, "r", encoding="utf-8") as fp:
  4007. tokenizer_config_json = json.load(fp)
  4008. add_prefix = tokenizer.add_prefix_space
  4009. remove_whitespaces = tokenizer.clean_up_tokenization_spaces
  4010. precompiled_charsmap = b64decode(tokenizer_json["normalizer"]["precompiled_charsmap"])
  4011. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size)
  4012. else:
  4013. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  4014. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  4015. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  4016. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  4017. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  4018. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  4019. tokenizer = SentencePieceProcessor()
  4020. tokenizer.LoadFromFile(str(tokenizer_path))
  4021. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size())
  4022. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4023. scores: list[float] = [-10000.0] * vocab_size
  4024. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4025. if isinstance(tokenizer, SentencePieceProcessor):
  4026. for token_id in range(tokenizer.vocab_size()):
  4027. piece = tokenizer.IdToPiece(token_id)
  4028. text = piece.encode("utf-8")
  4029. score = tokenizer.GetScore(token_id)
  4030. toktype = SentencePieceTokenTypes.NORMAL
  4031. if tokenizer.IsUnknown(token_id):
  4032. toktype = SentencePieceTokenTypes.UNKNOWN
  4033. elif tokenizer.IsControl(token_id):
  4034. toktype = SentencePieceTokenTypes.CONTROL
  4035. elif tokenizer.IsUnused(token_id):
  4036. toktype = SentencePieceTokenTypes.UNUSED
  4037. elif tokenizer.IsByte(token_id):
  4038. toktype = SentencePieceTokenTypes.BYTE
  4039. tokens[token_id] = text
  4040. scores[token_id] = score
  4041. toktypes[token_id] = toktype
  4042. else:
  4043. added_vocab = tokenizer.get_added_vocab()
  4044. unk_token = tokenizer_config_json.get("unk_token")
  4045. unk_token_id = added_vocab.get(unk_token, tokenizer_json["model"].get("unk_id", 3))
  4046. for token_id in range(tokenizer.vocab_size):
  4047. piece = tokenizer._convert_id_to_token(token_id)
  4048. if (piece := tokenizer._convert_id_to_token(token_id)) is not None:
  4049. text = piece.encode("utf-8")
  4050. score = tokenizer_json["model"]["vocab"][token_id][1]
  4051. toktype = SentencePieceTokenTypes.NORMAL
  4052. if token_id == unk_token_id:
  4053. toktype = SentencePieceTokenTypes.UNKNOWN
  4054. elif token_id in tokenizer.all_special_ids:
  4055. toktype = SentencePieceTokenTypes.CONTROL
  4056. elif token_id in added_vocab.values():
  4057. toktype = SentencePieceTokenTypes.USER_DEFINED
  4058. # No reliable way to detect this, but jina doesn't have any
  4059. # elif tokenizer.IsByte(token_id):
  4060. # toktype = SentencePieceTokenTypes.BYTE
  4061. tokens[token_id] = text
  4062. scores[token_id] = score
  4063. toktypes[token_id] = toktype
  4064. if isinstance(tokenizer, SentencePieceProcessor):
  4065. # realign tokens (see HF tokenizer code)
  4066. tokens = [b'<s>', b'<pad>', b'</s>', b'<unk>'] + tokens[3:-1]
  4067. scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1]
  4068. toktypes = [
  4069. SentencePieceTokenTypes.CONTROL,
  4070. SentencePieceTokenTypes.CONTROL,
  4071. SentencePieceTokenTypes.CONTROL,
  4072. SentencePieceTokenTypes.UNKNOWN,
  4073. ] + toktypes[3:-1]
  4074. if self.model_arch == gguf.MODEL_ARCH.NOMIC_BERT_MOE:
  4075. # Add mask token missing from sentencepiece.bpe.model
  4076. tokens[250001] = b'<mask>'
  4077. scores[250001] = 0.0
  4078. toktypes[250001] = SentencePieceTokenTypes.CONTROL
  4079. self.gguf_writer.add_tokenizer_model("t5")
  4080. self.gguf_writer.add_tokenizer_pre("default")
  4081. self.gguf_writer.add_token_list(tokens)
  4082. self.gguf_writer.add_token_scores(scores)
  4083. self.gguf_writer.add_token_types(toktypes)
  4084. self.gguf_writer.add_add_space_prefix(add_prefix)
  4085. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  4086. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  4087. if precompiled_charsmap:
  4088. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  4089. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4090. special_vocab.add_to_gguf(self.gguf_writer)
  4091. @ModelBase.register("DistilBertModel", "DistilBertForMaskedLM", "DistilBertForSequenceClassification")
  4092. class DistilBertModel(BertModel):
  4093. model_arch = gguf.MODEL_ARCH.BERT
  4094. def set_gguf_parameters(self):
  4095. self.gguf_writer.add_layer_norm_eps(1e-12)
  4096. logger.info("gguf: layer norm epsilon = 1e-12")
  4097. super().set_gguf_parameters()
  4098. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4099. if name.startswith("distilbert."):
  4100. name = name[11:]
  4101. # These layers act as MLM head, so we don't need them
  4102. if name.startswith("vocab_"):
  4103. return []
  4104. return super().modify_tensors(data_torch, name, bid)
  4105. @ModelBase.register("RobertaModel", "RobertaForSequenceClassification")
  4106. class RobertaModel(BertModel):
  4107. model_arch = gguf.MODEL_ARCH.BERT
  4108. def __init__(self, *args, **kwargs):
  4109. super().__init__(*args, **kwargs)
  4110. # we need the pad_token_id to know how to chop down position_embd matrix
  4111. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  4112. self._position_offset = 1 + pad_token_id
  4113. if "max_position_embeddings" in self.hparams:
  4114. self.hparams["max_position_embeddings"] -= self._position_offset
  4115. else:
  4116. self._position_offset = None
  4117. def set_vocab(self):
  4118. """Support BPE tokenizers for roberta models"""
  4119. bpe_tok_path = self.dir_model / "tokenizer.json"
  4120. if bpe_tok_path.exists():
  4121. self._set_vocab_gpt2()
  4122. # we need this to validate the size of the token_type embeddings
  4123. # though currently we are passing all zeros to the token_type embeddings
  4124. # "Sequence A" or "Sequence B"
  4125. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  4126. else:
  4127. return super().set_vocab()
  4128. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4129. # if name starts with "roberta.", remove the prefix
  4130. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  4131. if name.startswith("roberta."):
  4132. name = name[8:]
  4133. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  4134. if name == "embeddings.position_embeddings.weight":
  4135. if self._position_offset is not None:
  4136. data_torch = data_torch[self._position_offset:,:]
  4137. return super().modify_tensors(data_torch, name, bid)
  4138. @ModelBase.register("NomicBertModel")
  4139. class NomicBertModel(BertModel):
  4140. model_arch = gguf.MODEL_ARCH.BERT
  4141. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  4142. hparams = kwargs.pop("hparams", None)
  4143. if hparams is None:
  4144. hparams = ModelBase.load_hparams(dir_model, False)
  4145. self.is_moe = bool(hparams.get("moe_every_n_layers"))
  4146. self.model_arch = gguf.MODEL_ARCH.NOMIC_BERT_MOE if self.is_moe else gguf.MODEL_ARCH.NOMIC_BERT
  4147. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  4148. self._tokenizer_is_xlmroberta = self._is_tokenizer_xlmroberta()
  4149. if self._tokenizer_is_xlmroberta:
  4150. self._xlmroberta_tokenizer_init()
  4151. npos, mtp = self.hparams["n_positions"], self.hparams.get("max_trained_positions", 2048)
  4152. if npos == 8192 and mtp == 2048:
  4153. self.hparams["n_positions"] = 2048 # nomic-embed-text v1 and v1.5 are trained for 2048 tokens.
  4154. elif npos == 2048 and mtp == 2048:
  4155. self.hparams["n_positions"] = 512 # nomic-embed-text-v2-moe is trained for 512 tokens.
  4156. else:
  4157. raise ValueError(f"unrecognized parameters: n_positions={npos}, max_trained_positions={mtp}")
  4158. assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu"
  4159. # this doesn't do anything in the HF version
  4160. assert self.hparams["causal"] is False
  4161. # no bias tensors unless MoE
  4162. assert self.hparams["qkv_proj_bias"] == self.is_moe
  4163. assert self.hparams["mlp_fc1_bias"] == self.is_moe
  4164. assert self.hparams["mlp_fc2_bias"] == self.is_moe
  4165. # norm at end of layer
  4166. assert self.hparams["prenorm"] is False
  4167. # standard RoPE
  4168. assert self.hparams["rotary_emb_fraction"] == 1.0
  4169. assert self.hparams["rotary_emb_interleaved"] is False
  4170. assert self.hparams["rotary_emb_scale_base"] is None
  4171. def set_vocab(self) -> None:
  4172. if self._tokenizer_is_xlmroberta:
  4173. return self._xlmroberta_set_vocab()
  4174. return super().set_vocab()
  4175. def modify_tensors(self, data_torch: torch.Tensor, name: str, bid: int | None) -> Iterable[tuple[str, torch.Tensor]]:
  4176. # If the tensor is an experts bias tensor, skip it by returning an empty list.
  4177. if "mlp.experts.bias" in name:
  4178. return [] # Explicitly return an empty list.
  4179. if "mlp.experts.mlp.w1" in name:
  4180. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  4181. name += ".weight"
  4182. if "mlp.experts.mlp.w2" in name:
  4183. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  4184. data_torch = data_torch.transpose(1, 2)
  4185. name += ".weight"
  4186. return [(self.map_tensor_name(name), data_torch)]
  4187. def set_gguf_parameters(self):
  4188. super().set_gguf_parameters()
  4189. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  4190. if self.is_moe:
  4191. self.gguf_writer.add_moe_every_n_layers(self.hparams["moe_every_n_layers"])
  4192. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  4193. self.gguf_writer.add_expert_used_count(self.hparams["moe_top_k"])
  4194. def _is_tokenizer_xlmroberta(self) -> bool:
  4195. with open(self.dir_model / "tokenizer.json") as f:
  4196. tokenizer_json = json.load(f)
  4197. toktyp = tokenizer_json["model"]["type"]
  4198. if toktyp == "Unigram":
  4199. return True
  4200. if toktyp == "WordPiece":
  4201. return False
  4202. raise ValueError(f"unknown tokenizer: {toktyp}")
  4203. @ModelBase.register("NeoBERT", "NeoBERTLMHead", "NeoBERTForSequenceClassification")
  4204. class NeoBert(BertModel):
  4205. model_arch = gguf.MODEL_ARCH.NEO_BERT
  4206. def set_gguf_parameters(self):
  4207. super().set_gguf_parameters()
  4208. # NeoBERT uses 2/3 of the intermediate size as feed forward length
  4209. self.gguf_writer.add_feed_forward_length(int(2 * self.hparams["intermediate_size"] / 3))
  4210. self.gguf_writer.add_rope_freq_base(10000.0) # default value for NeoBERT
  4211. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4212. f_rms_eps = self.hparams.get("norm_eps", 1e-6) # default value for NeoBERT
  4213. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  4214. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  4215. self.gguf_writer.add_pooling_type(gguf.PoolingType.CLS) # https://huggingface.co/chandar-lab/NeoBERT#how-to-use
  4216. def modify_tensors(self, data_torch, name, bid):
  4217. if name.startswith("decoder."):
  4218. return []
  4219. if name.startswith("model."):
  4220. name = name[6:]
  4221. return super().modify_tensors(data_torch, name, bid)
  4222. @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification")
  4223. class XLMRobertaModel(BertModel):
  4224. model_arch = gguf.MODEL_ARCH.BERT
  4225. _lora_files = {}
  4226. _lora_names = []
  4227. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  4228. hparams = kwargs.pop("hparams", None)
  4229. if hparams is None:
  4230. hparams = ModelBase.load_hparams(dir_model, False)
  4231. if lora_names := hparams.get("lora_adaptations"):
  4232. self._lora_names = lora_names
  4233. self.model_arch = gguf.MODEL_ARCH.JINA_BERT_V3
  4234. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  4235. self._xlmroberta_tokenizer_init()
  4236. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  4237. if self._lora_names:
  4238. for name in self._lora_names:
  4239. fname = self.add_prefix_to_filename(self.fname_out, f"lora-{name}-")
  4240. self._lora_files[name] = gguf.GGUFWriter(fname, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, dry_run=self.dry_run)
  4241. return super().generate_extra_tensors()
  4242. def set_type(self):
  4243. for lora_writer in self._lora_files.values():
  4244. lora_writer.add_type(gguf.GGUFType.ADAPTER)
  4245. lora_writer.add_string(gguf.Keys.Adapter.TYPE, "lora")
  4246. super().set_type()
  4247. def set_vocab(self):
  4248. self._xlmroberta_set_vocab()
  4249. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4250. # if name starts with "roberta.", remove the prefix
  4251. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  4252. if name.startswith("roberta."):
  4253. name = name[8:]
  4254. # jina-embeddings-v3
  4255. if ".parametrizations." in name:
  4256. name = name.replace(".parametrizations.", ".")
  4257. if name.endswith(".original"):
  4258. name = name[:-9]
  4259. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  4260. if name == "embeddings.position_embeddings.weight":
  4261. if self._position_offset is not None:
  4262. data_torch = data_torch[self._position_offset:,:]
  4263. if name.endswith(".0.lora_A") or name.endswith(".0.lora_B"):
  4264. if name.startswith("pooler.dense"):
  4265. return []
  4266. num_loras = data_torch.size(0)
  4267. assert num_loras == len(self._lora_names)
  4268. # Split out each LoRA in their own GGUF
  4269. for i, lora_writer in enumerate(self._lora_files.values()):
  4270. new_name = self.map_tensor_name(name[:-9]) + name[-7:].lower()
  4271. data = data_torch[i, :, :]
  4272. # Transpose/flip token_embd/types into correct shape
  4273. if new_name == "token_embd.weight.lora_b":
  4274. data = data.T
  4275. elif new_name.startswith("token_types.weight."):
  4276. new_name = new_name[:-1] + ("a" if new_name[-1:] == "b" else "b")
  4277. lora_writer.add_tensor(new_name, data.float().numpy(), raw_dtype=gguf.GGMLQuantizationType.F32)
  4278. return []
  4279. return super().modify_tensors(data_torch, name, bid)
  4280. def set_gguf_parameters(self):
  4281. super().set_gguf_parameters()
  4282. # jina-embeddings-v3
  4283. if rotary_emb_base := self.hparams.get("rotary_emb_base"):
  4284. self.gguf_writer.add_rope_freq_base(rotary_emb_base)
  4285. lora_alpha = self.hparams.get("lora_alpha")
  4286. if lora_prompt_prefixes := self.hparams.get("task_instructions"):
  4287. assert self._lora_files and all(lora_name in lora_prompt_prefixes for lora_name in self._lora_files.keys())
  4288. for lora_name, lora_writer in self._lora_files.items():
  4289. lora_writer.add_float32(gguf.Keys.Adapter.LORA_ALPHA, lora_alpha if lora_alpha is not None else 1.0)
  4290. lora_writer.add_string(gguf.Keys.Adapter.LORA_TASK_NAME, lora_name)
  4291. if lora_prompt_prefixes:
  4292. lora_writer.add_string(gguf.Keys.Adapter.LORA_PROMPT_PREFIX, lora_prompt_prefixes[lora_name])
  4293. def write(self):
  4294. super().write()
  4295. for lora_writer in self._lora_files.values():
  4296. lora_writer.write_header_to_file()
  4297. lora_writer.write_kv_data_to_file()
  4298. lora_writer.write_tensors_to_file(progress=True)
  4299. lora_writer.close()
  4300. @ModelBase.register("GemmaForCausalLM")
  4301. class GemmaModel(TextModel):
  4302. model_arch = gguf.MODEL_ARCH.GEMMA
  4303. def set_vocab(self):
  4304. self._set_vocab_sentencepiece()
  4305. # TODO: these special tokens should be exported only for the CodeGemma family
  4306. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  4307. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  4308. special_vocab._set_special_token("prefix", 67)
  4309. special_vocab._set_special_token("suffix", 69)
  4310. special_vocab._set_special_token("middle", 68)
  4311. special_vocab._set_special_token("fsep", 70)
  4312. special_vocab._set_special_token("eot", 107)
  4313. special_vocab.chat_template = None # do not add it twice
  4314. special_vocab.add_to_gguf(self.gguf_writer)
  4315. self.gguf_writer.add_add_space_prefix(False)
  4316. def set_gguf_parameters(self):
  4317. hparams = self.hparams
  4318. block_count = hparams["num_hidden_layers"]
  4319. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  4320. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4321. self.gguf_writer.add_block_count(block_count)
  4322. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4323. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  4324. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  4325. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  4326. self.gguf_writer.add_key_length(hparams["head_dim"])
  4327. self.gguf_writer.add_value_length(hparams["head_dim"])
  4328. self.gguf_writer.add_file_type(self.ftype)
  4329. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4330. del bid # unused
  4331. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  4332. # To prevent errors, skip loading lm_head.weight.
  4333. if name == "lm_head.weight":
  4334. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  4335. return []
  4336. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  4337. if name.endswith("norm.weight"):
  4338. data_torch = data_torch + 1
  4339. return [(self.map_tensor_name(name), data_torch)]
  4340. @ModelBase.register("Gemma2ForCausalLM")
  4341. class Gemma2Model(TextModel):
  4342. model_arch = gguf.MODEL_ARCH.GEMMA2
  4343. def set_vocab(self):
  4344. self._set_vocab_sentencepiece()
  4345. self.gguf_writer.add_add_space_prefix(False)
  4346. def set_gguf_parameters(self):
  4347. hparams = self.hparams
  4348. block_count = hparams["num_hidden_layers"]
  4349. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  4350. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4351. self.gguf_writer.add_block_count(block_count)
  4352. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4353. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  4354. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  4355. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  4356. self.gguf_writer.add_key_length(hparams["head_dim"])
  4357. self.gguf_writer.add_value_length(hparams["head_dim"])
  4358. self.gguf_writer.add_file_type(self.ftype)
  4359. self.gguf_writer.add_attn_logit_softcapping(
  4360. self.hparams["attn_logit_softcapping"]
  4361. )
  4362. self.gguf_writer.add_final_logit_softcapping(
  4363. self.hparams["final_logit_softcapping"]
  4364. )
  4365. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  4366. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4367. del bid # unused
  4368. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  4369. # To prevent errors, skip loading lm_head.weight.
  4370. if name == "lm_head.weight":
  4371. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  4372. return []
  4373. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  4374. if name.endswith("norm.weight"):
  4375. data_torch = data_torch + 1
  4376. return [(self.map_tensor_name(name), data_torch)]
  4377. @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration")
  4378. class Gemma3Model(TextModel):
  4379. model_arch = gguf.MODEL_ARCH.GEMMA3
  4380. norm_shift = 1.0 # Gemma3RMSNorm adds 1.0 to the norm value
  4381. def set_vocab(self):
  4382. self._set_vocab_sentencepiece()
  4383. self.gguf_writer.add_add_space_prefix(False)
  4384. def set_gguf_parameters(self):
  4385. hparams = self.hparams
  4386. block_count = hparams["num_hidden_layers"]
  4387. # some default values are not specified in the hparams
  4388. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072))
  4389. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4390. self.gguf_writer.add_block_count(block_count)
  4391. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4392. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8))
  4393. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6))
  4394. self.gguf_writer.add_key_length(hparams.get("head_dim", 256))
  4395. self.gguf_writer.add_value_length(hparams.get("head_dim", 256))
  4396. self.gguf_writer.add_file_type(self.ftype)
  4397. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers
  4398. # attn_logit_softcapping is removed in Gemma3
  4399. assert hparams.get("attn_logit_softcapping") is None
  4400. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  4401. self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4))
  4402. if hparams.get("rope_scaling") is not None:
  4403. assert hparams["rope_scaling"]["rope_type"] == "linear"
  4404. # important: this rope_scaling is only applied for global layers, and not used by 1B model
  4405. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4406. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  4407. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4408. del bid # unused
  4409. if "language_model." in name:
  4410. name = name.replace("language_model.", "")
  4411. elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  4412. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  4413. return [] # skip vision tensors
  4414. # remove OOV (out-of-vocabulary) rows in token_embd
  4415. if "embed_tokens.weight" in name:
  4416. vocab = self._create_vocab_sentencepiece()
  4417. tokens = vocab[0]
  4418. data_torch = data_torch[:len(tokens)]
  4419. # ref code in Gemma3RMSNorm
  4420. # output = output * (1.0 + self.weight.float())
  4421. # note: this is not the case on gemma3n
  4422. if name.endswith("norm.weight"):
  4423. data_torch = data_torch + self.norm_shift
  4424. return [(self.map_tensor_name(name), data_torch)]
  4425. @ModelBase.register("Gemma3TextModel")
  4426. class EmbeddingGemma(Gemma3Model):
  4427. model_arch = gguf.MODEL_ARCH.GEMMA_EMBEDDING
  4428. module_paths = []
  4429. dense_features_dims = {}
  4430. def __init__(self, *args, **kwargs):
  4431. super().__init__(*args, **kwargs)
  4432. if self.sentence_transformers_dense_modules:
  4433. # read modules.json to determine if model has Dense layers
  4434. modules_file = self.dir_model / "modules.json"
  4435. if modules_file.is_file():
  4436. with open(modules_file, encoding="utf-8") as modules_json_file:
  4437. mods = json.load(modules_json_file)
  4438. for mod in mods:
  4439. if mod["type"] == "sentence_transformers.models.Dense":
  4440. mod_path = mod["path"]
  4441. # check if model.safetensors file for Dense layer exists
  4442. model_tensors_file = self.dir_model / mod_path / "model.safetensors"
  4443. if model_tensors_file.is_file():
  4444. self.module_paths.append(mod_path)
  4445. # read config.json of the Dense layer to get in/out features
  4446. mod_conf_file = self.dir_model / mod_path / "config.json"
  4447. if mod_conf_file.is_file():
  4448. with open(mod_conf_file, encoding="utf-8") as mod_conf_json_file:
  4449. mod_conf = json.load(mod_conf_json_file)
  4450. # hparams dense_2_feat_out and dense_3_feat_in are required when loading model's dense weights
  4451. prefix = self._get_dense_prefix(mod_path)
  4452. if mod_conf["in_features"] is not None and mod_conf["out_features"] is not None:
  4453. self.dense_features_dims[prefix] = (mod_conf["in_features"], mod_conf["out_features"])
  4454. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  4455. from safetensors.torch import load_file
  4456. module_paths = list(self.module_paths)
  4457. for i, module_path in enumerate(module_paths):
  4458. tensors_file = self.dir_model / module_path / "model.safetensors"
  4459. local_tensors = load_file(tensors_file)
  4460. tensor_name = self._get_dense_prefix(module_path)
  4461. for name, local_tensor in local_tensors.items():
  4462. if not name.endswith(".weight"):
  4463. continue
  4464. orig_name = name.replace("linear", tensor_name)
  4465. name = self.map_tensor_name(orig_name)
  4466. yield name, local_tensor.clone()
  4467. @staticmethod
  4468. def _get_dense_prefix(module_path) -> str:
  4469. """Get the tensor name prefix for the Dense layer from module path."""
  4470. tensor_name = "dense_2" if module_path == "2_Dense" else "dense_3"
  4471. return tensor_name
  4472. def set_gguf_parameters(self):
  4473. super().set_gguf_parameters()
  4474. # Override the sliding window size as it gets adjusted by the Gemma3TextConfig
  4475. # constructor. We want to use the value from the original model's config.json.
  4476. # ref: https://github.com/huggingface/transformers/pull/40700
  4477. with open(self.dir_model / "config.json", "r", encoding="utf-8") as f:
  4478. config = json.load(f)
  4479. orig_sliding_window = config.get("sliding_window")
  4480. if orig_sliding_window is None:
  4481. raise ValueError("sliding_window not found in model config - this is required for the model")
  4482. logger.info(f"Using original sliding_window from config: {orig_sliding_window} "
  4483. f"instead of {self.hparams['sliding_window']}")
  4484. self.gguf_writer.add_sliding_window(orig_sliding_window)
  4485. if self.sentence_transformers_dense_modules:
  4486. for dense, dims in self.dense_features_dims.items():
  4487. logger.info(f"Setting dense layer {dense} in/out features to {dims}")
  4488. self.gguf_writer.add_dense_features_dims(dense, dims[0], dims[1])
  4489. self._try_set_pooling_type()
  4490. @ModelBase.register("Gemma3ForConditionalGeneration")
  4491. class Gemma3VisionModel(MmprojModel):
  4492. def set_gguf_parameters(self):
  4493. super().set_gguf_parameters()
  4494. hparams = self.hparams
  4495. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.GEMMA3)
  4496. # default values below are taken from HF tranformers code
  4497. self.gguf_writer.add_vision_attention_layernorm_eps(hparams.get("layer_norm_eps", 1e-6))
  4498. self.gguf_writer.add_vision_use_gelu(True)
  4499. # calculate proj_scale_factor (used by tinygemma3 test model)
  4500. image_seq_length = self.preprocessor_config.get("image_seq_length", 256)
  4501. n_per_side = int(image_seq_length ** 0.5)
  4502. image_size = self.hparams["image_size"]
  4503. patch_size = self.hparams["patch_size"]
  4504. proj_scale_factor = (image_size // patch_size) // n_per_side
  4505. if proj_scale_factor > 0 and proj_scale_factor != 4:
  4506. # we only need to write this if it's not the default value
  4507. # in this case, we are converting a test model
  4508. self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
  4509. def tensor_force_quant(self, name, new_name, bid, n_dims):
  4510. # related to https://github.com/ggml-org/llama.cpp/issues/13025
  4511. if "input_projection" in name:
  4512. return gguf.GGMLQuantizationType.F16
  4513. if ".embeddings." in name:
  4514. return gguf.GGMLQuantizationType.F32
  4515. return super().tensor_force_quant(name, new_name, bid, n_dims)
  4516. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4517. del bid # unused
  4518. if "vision_model.head." in name:
  4519. return [] # skip redundant tensors for tinygemma3
  4520. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  4521. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  4522. # process vision tensors
  4523. name = name.replace("_weight", ".weight")
  4524. # correct norm value ; only this "soft_emb_norm" need to be corrected as it's part of Gemma projector
  4525. # the other norm values are part of SigLIP model, and they are already correct
  4526. # ref code: Gemma3RMSNorm
  4527. if "soft_emb_norm.weight" in name:
  4528. logger.info(f"Correcting norm value for '{name}'")
  4529. data_torch = data_torch + 1
  4530. return [(self.map_tensor_name(name), data_torch)]
  4531. return [] # skip other tensors
  4532. @ModelBase.register("Gemma3nForConditionalGeneration")
  4533. class Gemma3NModel(Gemma3Model):
  4534. model_arch = gguf.MODEL_ARCH.GEMMA3N
  4535. norm_shift = 0.0 # same value with Gemma3p5RMSNorm scale_shift on python code
  4536. _altup_proj: list[Tensor] = []
  4537. _altup_unembd: list[Tensor] = []
  4538. def __init__(self, *args, **kwargs):
  4539. super().__init__(*args, **kwargs)
  4540. assert self.hparams["altup_num_inputs"] == 4, "Current conversion only supports 4 altup inputs"
  4541. self._altup_proj = [
  4542. torch.Tensor(), # to be replaced
  4543. torch.Tensor(), # to be replaced
  4544. torch.Tensor(), # to be replaced
  4545. ]
  4546. self._altup_unembd = [
  4547. torch.Tensor(), # to be replaced
  4548. torch.Tensor(), # to be replaced
  4549. torch.Tensor(), # to be replaced
  4550. ]
  4551. def set_vocab(self):
  4552. super().set_vocab()
  4553. def set_gguf_parameters(self):
  4554. super().set_gguf_parameters()
  4555. self.gguf_writer.add_altup_active_idx(self.hparams["altup_active_idx"])
  4556. self.gguf_writer.add_altup_num_inputs(self.hparams["altup_num_inputs"])
  4557. self.gguf_writer.add_embedding_length_per_layer_input(self.hparams["hidden_size_per_layer_input"])
  4558. self.gguf_writer.add_shared_kv_layers(self.hparams["num_kv_shared_layers"])
  4559. activation_sparsity_scale = []
  4560. for s in self.hparams["activation_sparsity_pattern"]:
  4561. normal_dist = torch.distributions.normal.Normal(0, 1)
  4562. std_multiplier = normal_dist.icdf(torch.tensor(s, dtype=torch.float32))
  4563. activation_sparsity_scale.append(std_multiplier.item())
  4564. self.gguf_writer.add_activation_sparsity_scale(activation_sparsity_scale)
  4565. sliding_window_pattern = []
  4566. for t in self.hparams["layer_types"]:
  4567. sliding_window_pattern.append(t == "sliding_attention")
  4568. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  4569. def _stack_matrices(self, matrices: list[Tensor]) -> Tensor | None:
  4570. has_all = all(m.numel() > 0 for m in matrices)
  4571. if not has_all:
  4572. return None
  4573. else:
  4574. return torch.stack(matrices, dim=0)
  4575. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4576. if name.endswith("_scale"):
  4577. name = name + ".weight"
  4578. # TODO: implement self.prediction_coefs.weight.clamp_(...)
  4579. if "language_model." not in name:
  4580. return [] # skip non-language model tensors
  4581. if "altup_unembed_projections" in name:
  4582. data_torch = data_torch.to(device="cpu")
  4583. if ".0." in name:
  4584. self._altup_unembd[0] = data_torch
  4585. elif ".1." in name:
  4586. self._altup_unembd[1] = data_torch
  4587. elif ".2." in name:
  4588. self._altup_unembd[2] = data_torch
  4589. else:
  4590. raise ValueError(f"Unknown name: {name}")
  4591. out = self._stack_matrices(self._altup_unembd)
  4592. if out is not None:
  4593. return [(self.map_tensor_name("model.altup_unembed_projections.weight"), out)]
  4594. else:
  4595. return []
  4596. if "altup_projections" in name:
  4597. data_torch = data_torch.to(device="cpu")
  4598. if ".0." in name:
  4599. self._altup_proj[0] = data_torch
  4600. elif ".1." in name:
  4601. self._altup_proj[1] = data_torch
  4602. elif ".2." in name:
  4603. self._altup_proj[2] = data_torch
  4604. else:
  4605. raise ValueError(f"Unknown name: {name}")
  4606. out = self._stack_matrices(self._altup_proj)
  4607. if out is not None:
  4608. return [(self.map_tensor_name("model.altup_projections.weight"), out)]
  4609. else:
  4610. return []
  4611. return super().modify_tensors(data_torch, name, bid)
  4612. @ModelBase.register("Starcoder2ForCausalLM")
  4613. class StarCoder2Model(TextModel):
  4614. model_arch = gguf.MODEL_ARCH.STARCODER2
  4615. @ModelBase.register("Rwkv6ForCausalLM")
  4616. class Rwkv6Model(TextModel):
  4617. model_arch = gguf.MODEL_ARCH.RWKV6
  4618. def set_vocab(self):
  4619. self._set_vocab_rwkv_world()
  4620. def set_gguf_parameters(self):
  4621. block_count = self.hparams["num_hidden_layers"]
  4622. head_size = self.hparams["head_size"]
  4623. hidden_size = self.hparams["hidden_size"]
  4624. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  4625. rescale_every_n_layers = self.hparams["rescale_every"]
  4626. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else int((hidden_size * 3.5) // 32 * 32)
  4627. time_mix_extra_dim = 64 if hidden_size == 4096 else 32
  4628. time_decay_extra_dim = 128 if hidden_size == 4096 else 64
  4629. # RWKV isn't context limited
  4630. self.gguf_writer.add_context_length(1048576)
  4631. self.gguf_writer.add_embedding_length(hidden_size)
  4632. self.gguf_writer.add_block_count(block_count)
  4633. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  4634. self.gguf_writer.add_rescale_every_n_layers(rescale_every_n_layers)
  4635. self.gguf_writer.add_wkv_head_size(head_size)
  4636. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  4637. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  4638. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4639. self.gguf_writer.add_file_type(self.ftype)
  4640. # required by llama.cpp, unused
  4641. self.gguf_writer.add_head_count(0)
  4642. lerp_weights: dict[int, dict[str, Tensor]] = {}
  4643. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4644. new_name = self.map_tensor_name(name)
  4645. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  4646. new_name += ".weight"
  4647. if new_name.endswith("time_mix_w1.weight") or new_name.endswith("time_mix_decay_w1.weight") or new_name.endswith("time_mix_decay_w2.weight"):
  4648. data_torch = data_torch.transpose(0, 1)
  4649. if new_name.endswith("time_mix_w2.weight"):
  4650. data_torch = data_torch.permute(0, 2, 1)
  4651. if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name:
  4652. data_torch = data_torch.squeeze()
  4653. try:
  4654. rescale_every_n_layers = self.hparams["rescale_every"]
  4655. if rescale_every_n_layers > 0:
  4656. if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):
  4657. data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers))
  4658. except KeyError:
  4659. pass
  4660. # concat time_mix_lerp weights to reduce some cpu overhead
  4661. # also reduces the number of tensors in the model
  4662. if bid is not None and "time_mix_lerp" in new_name and "time_mix_lerp_x" not in new_name:
  4663. try:
  4664. self.lerp_weights[bid][new_name] = data_torch
  4665. except KeyError:
  4666. self.lerp_weights[bid] = {new_name: data_torch}
  4667. if all(f"blk.{bid}.time_mix_lerp_{i}.weight" in self.lerp_weights[bid].keys() for i in ["w", "k", "v", "r", "g"]):
  4668. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4669. data = torch.stack([self.lerp_weights[bid][f"blk.{bid}.time_mix_lerp_{i}.weight"].unsqueeze(0) for i in ["w", "k", "v", "r", "g"]], dim=0).unsqueeze(1)
  4670. yield (new_name, data)
  4671. return
  4672. yield (new_name, data_torch)
  4673. @ModelBase.register("RWKV6Qwen2ForCausalLM")
  4674. class RWKV6Qwen2Model(Rwkv6Model):
  4675. model_arch = gguf.MODEL_ARCH.RWKV6QWEN2
  4676. def set_vocab(self):
  4677. try:
  4678. self._set_vocab_sentencepiece()
  4679. except FileNotFoundError:
  4680. self._set_vocab_gpt2()
  4681. def set_gguf_parameters(self):
  4682. block_count = self.hparams["num_hidden_layers"]
  4683. num_attention_heads = self.hparams["num_attention_heads"]
  4684. num_key_value_heads = self.hparams["num_key_value_heads"]
  4685. hidden_size = self.hparams["hidden_size"]
  4686. head_size = hidden_size // num_attention_heads
  4687. rms_norm_eps = self.hparams["rms_norm_eps"]
  4688. intermediate_size = self.hparams["intermediate_size"]
  4689. time_mix_extra_dim = self.hparams.get("lora_rank_tokenshift", 64 if hidden_size >= 4096 else 32)
  4690. time_decay_extra_dim = self.hparams.get("lora_rank_decay", 128 if hidden_size >= 4096 else 64)
  4691. # RWKV isn't context limited
  4692. self.gguf_writer.add_context_length(1048576)
  4693. self.gguf_writer.add_embedding_length(hidden_size)
  4694. self.gguf_writer.add_block_count(block_count)
  4695. self.gguf_writer.add_wkv_head_size(head_size)
  4696. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  4697. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  4698. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4699. self.gguf_writer.add_file_type(self.ftype)
  4700. # special parameters for time_mixing in RWKV6QWEN2
  4701. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4702. self.gguf_writer.add_token_shift_count(1)
  4703. # RWKV6QWEN2 use grouped key/value like GQA
  4704. self.gguf_writer.add_head_count_kv(num_key_value_heads)
  4705. # required by llama.cpp, unused
  4706. self.gguf_writer.add_head_count(0)
  4707. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4708. for new_name, data in super().modify_tensors(data_torch, name, bid):
  4709. if "time_mix_w1" in new_name or "time_mix_w2" in new_name:
  4710. data = data.view(5, -1, data.shape[-1])
  4711. # rwkv6qwen2 has a different order of rkvwg instead of the original wkvrg
  4712. # permute them here to avoid code changes
  4713. data = torch.stack([data[3], data[1], data[2], data[0], data[4]], dim=0).view(-1, data.shape[-1])
  4714. if "w2" in new_name:
  4715. data = data.view(5, -1, data.shape[-1])
  4716. yield (new_name, data)
  4717. continue
  4718. yield (new_name, data)
  4719. @ModelBase.register("Rwkv7ForCausalLM", "RWKV7ForCausalLM")
  4720. class Rwkv7Model(TextModel):
  4721. model_arch = gguf.MODEL_ARCH.RWKV7
  4722. def set_vocab(self):
  4723. self._set_vocab_rwkv_world()
  4724. def calc_lora_rank(self, hidden_size, exponent, multiplier):
  4725. return max(1, round(hidden_size ** exponent * multiplier / 32)) * 32
  4726. def set_gguf_parameters(self):
  4727. block_count = self.hparams["num_hidden_layers"]
  4728. try:
  4729. head_size = self.hparams["head_size"]
  4730. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  4731. except KeyError:
  4732. head_size = self.hparams["head_dim"]
  4733. layer_norm_eps = self.hparams["norm_eps"]
  4734. hidden_size = self.hparams["hidden_size"]
  4735. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else (hidden_size * 4)
  4736. # ICLR: In-Context-Learning-Rate
  4737. try:
  4738. lora_rank_decay = self.hparams["lora_rank_decay"] if self.hparams["lora_rank_decay"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4739. lora_rank_iclr = self.hparams["lora_rank_iclr"] if self.hparams["lora_rank_iclr"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4740. lora_rank_value_residual_mix = self.hparams["lora_rank_value_residual_mix"] if self.hparams["lora_rank_value_residual_mix"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  4741. lora_rank_gate = self.hparams["lora_rank_gate"] if self.hparams["lora_rank_gate"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  4742. except KeyError:
  4743. lora_rank_decay = self.hparams["decay_low_rank_dim"] if self.hparams["decay_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4744. lora_rank_iclr = self.hparams["a_low_rank_dim"] if self.hparams["a_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4745. lora_rank_value_residual_mix = self.hparams["v_low_rank_dim"] if self.hparams["v_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  4746. lora_rank_gate = self.hparams["gate_low_rank_dim"] if self.hparams["gate_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  4747. # RWKV isn't context limited
  4748. self.gguf_writer.add_context_length(1048576)
  4749. self.gguf_writer.add_embedding_length(hidden_size)
  4750. self.gguf_writer.add_block_count(block_count)
  4751. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  4752. self.gguf_writer.add_wkv_head_size(head_size)
  4753. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  4754. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  4755. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  4756. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  4757. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4758. self.gguf_writer.add_file_type(self.ftype)
  4759. # required by llama.cpp, unused
  4760. self.gguf_writer.add_head_count(0)
  4761. lerp_weights: dict[int, dict[str, Tensor]] = {}
  4762. lora_needs_transpose: bool = True
  4763. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4764. # unify tensor names here to make life easier
  4765. name = name.replace("blocks", "layers").replace("ffn", "feed_forward")
  4766. name = name.replace("self_attn", "attention").replace("attn", "attention")
  4767. name = name.replace("time_mixer.", "")
  4768. # lora layer names in fla-hub's impl
  4769. if "_lora.lora" in name:
  4770. self.lora_needs_transpose = False
  4771. name = name.replace("_lora.lora.0.weight", "1.weight")
  4772. name = name.replace("_lora.lora.2.weight", "2.weight")
  4773. name = name.replace("_lora.lora.2.bias", "0.weight")
  4774. name = name.replace("feed_forward_norm", "ln2")
  4775. name = name.replace("g_norm", "ln_x")
  4776. if "attention.v" in name and "value" not in self.map_tensor_name(name) and bid == 0:
  4777. # some models have dummy v0/v1/v2 on first layer while others don't
  4778. # ignore them all since they are not used
  4779. return
  4780. wkv_has_gate = self.hparams.get("wkv_has_gate", True)
  4781. lerp_list = ["r", "w", "k", "v", "a", "g"] if wkv_has_gate else ["r", "w", "k", "v", "a"]
  4782. if bid is not None and "attention.x_" in name:
  4783. if "attention.x_x" in name:
  4784. # already concatenated
  4785. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4786. data = data_torch.reshape(len(lerp_list), 1, 1, -1)
  4787. yield (new_name, data)
  4788. else:
  4789. try:
  4790. self.lerp_weights[bid][name] = data_torch
  4791. except KeyError:
  4792. self.lerp_weights[bid] = {name: data_torch}
  4793. if all(f"model.layers.{bid}.attention.x_{i}" in self.lerp_weights[bid].keys() for i in lerp_list):
  4794. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4795. data = torch.stack([self.lerp_weights[bid][f"model.layers.{bid}.attention.x_{i}"] for i in lerp_list], dim=0)
  4796. yield (new_name, data)
  4797. return
  4798. else:
  4799. data_torch = data_torch.squeeze()
  4800. new_name = self.map_tensor_name(name)
  4801. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  4802. new_name += ".weight"
  4803. if self.lora_needs_transpose and any(
  4804. new_name.endswith(t) for t in [
  4805. "time_mix_w1.weight", "time_mix_w2.weight",
  4806. "time_mix_a1.weight", "time_mix_a2.weight",
  4807. "time_mix_v1.weight", "time_mix_v2.weight",
  4808. "time_mix_g1.weight", "time_mix_g2.weight",
  4809. ]
  4810. ):
  4811. data_torch = data_torch.transpose(0, 1)
  4812. if 'r_k' in new_name:
  4813. data_torch = data_torch.flatten()
  4814. if bid == 0 and "time_mix_a" in new_name:
  4815. # dummy v0/v1/v2 on first layer
  4816. # easist way to make llama happy
  4817. yield (new_name.replace("time_mix_a", "time_mix_v"), data_torch)
  4818. yield (new_name, data_torch)
  4819. @ModelBase.register("RwkvHybridForCausalLM")
  4820. class ARwkv7Model(Rwkv7Model):
  4821. model_arch = gguf.MODEL_ARCH.ARWKV7
  4822. def set_vocab(self):
  4823. try:
  4824. self._set_vocab_sentencepiece()
  4825. except FileNotFoundError:
  4826. self._set_vocab_gpt2()
  4827. def set_gguf_parameters(self):
  4828. block_count = self.hparams["num_hidden_layers"]
  4829. hidden_size = self.hparams["hidden_size"]
  4830. head_size = self.hparams["head_size"]
  4831. rms_norm_eps = self.hparams["rms_norm_eps"]
  4832. intermediate_size = self.hparams["intermediate_size"]
  4833. wkv_has_gate = self.hparams["wkv_has_gate"]
  4834. assert self.hparams["wkv_version"] == 7
  4835. # ICLR: In-Context-Learning-Rate
  4836. lora_rank_decay = 64
  4837. lora_rank_iclr = 64
  4838. lora_rank_value_residual_mix = 32
  4839. lora_rank_gate = 128 if wkv_has_gate else 0
  4840. # RWKV isn't context limited
  4841. self.gguf_writer.add_context_length(1048576)
  4842. self.gguf_writer.add_embedding_length(hidden_size)
  4843. self.gguf_writer.add_block_count(block_count)
  4844. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4845. self.gguf_writer.add_wkv_head_size(head_size)
  4846. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  4847. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  4848. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  4849. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  4850. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4851. self.gguf_writer.add_file_type(self.ftype)
  4852. self.gguf_writer.add_token_shift_count(1)
  4853. # required by llama.cpp, unused
  4854. self.gguf_writer.add_head_count(0)
  4855. @ModelBase.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
  4856. class MambaModel(TextModel):
  4857. model_arch = gguf.MODEL_ARCH.MAMBA
  4858. def __init__(self, dir_model: Path, *args, **kwargs):
  4859. # Avoid using AutoConfig for hparams
  4860. hparams = kwargs.pop("hparams", None)
  4861. if hparams is None:
  4862. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  4863. hparams = json.load(f)
  4864. super().__init__(dir_model, *args, hparams=hparams, **kwargs)
  4865. def set_vocab(self):
  4866. vocab_size = self.hparams["vocab_size"]
  4867. # Round vocab size to next multiple of 8
  4868. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  4869. # pad using ceiling division
  4870. # ref: https://stackoverflow.com/a/17511341/22827863
  4871. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  4872. self.hparams["vocab_size"] = vocab_size
  4873. if (self.dir_model / "tokenizer.json").is_file():
  4874. self._set_vocab_gpt2()
  4875. elif (self.dir_model / "tokenizer.model").is_file():
  4876. self._set_vocab_sentencepiece()
  4877. else:
  4878. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  4879. self._set_vocab_builtin("gpt-neox", vocab_size)
  4880. def set_gguf_parameters(self):
  4881. d_model = self.find_hparam(["hidden_size", "d_model"])
  4882. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  4883. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  4884. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  4885. # ceiling division
  4886. # ref: https://stackoverflow.com/a/17511341/22827863
  4887. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  4888. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  4889. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  4890. use_dt_b_c_norm = False
  4891. # For falconmamba we do apply RMS norm on B / DT and C layers
  4892. if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
  4893. use_dt_b_c_norm = True
  4894. # Fail early for models which don't have a block expansion factor of 2
  4895. assert d_inner == 2 * d_model
  4896. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  4897. self.gguf_writer.add_embedding_length(d_model)
  4898. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  4899. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  4900. self.gguf_writer.add_block_count(self.block_count)
  4901. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4902. self.gguf_writer.add_ssm_inner_size(d_inner)
  4903. self.gguf_writer.add_ssm_state_size(d_state)
  4904. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  4905. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4906. self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
  4907. self.gguf_writer.add_file_type(self.ftype)
  4908. _tok_embd = None
  4909. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4910. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  4911. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  4912. new_name = self.map_tensor_name(name)
  4913. if name.endswith(".A_log"):
  4914. logger.debug("A_log --> A ==> " + new_name)
  4915. data_torch = -torch.exp(data_torch)
  4916. # [4 1 8192 1] -> [4 8192 1 1]
  4917. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4918. data_torch = data_torch.squeeze()
  4919. # assuming token_embd.weight is seen before output.weight
  4920. if self._tok_embd is not None and new_name == output_name:
  4921. if torch.equal(self._tok_embd, data_torch):
  4922. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  4923. return []
  4924. elif new_name == tok_embd_name:
  4925. self._tok_embd = data_torch
  4926. return [(new_name, data_torch)]
  4927. @ModelBase.register("Mamba2ForCausalLM")
  4928. class Mamba2Model(TextModel):
  4929. model_arch = gguf.MODEL_ARCH.MAMBA2
  4930. def __init__(self, dir_model: Path, *args, **kwargs):
  4931. # Avoid using AutoConfig for hparams
  4932. # It wrongly assumes all Mamba2 models are Mamba-Codestral-7B-v0.1
  4933. hparams = kwargs.pop("hparams", None)
  4934. if hparams is None:
  4935. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  4936. hparams = json.load(f)
  4937. super().__init__(dir_model, *args, hparams=hparams, **kwargs)
  4938. self.d_model = self.find_hparam(["hidden_size", "d_model", "dim"])
  4939. self.d_inner = self.find_hparam(["mamba_d_ssm", "intermediate_size", "d_inner"], optional=True) or 2 * self.d_model
  4940. self.n_group = self.find_hparam(["n_groups"], optional=True) or 1
  4941. def set_vocab(self):
  4942. vocab_size = self.hparams["vocab_size"]
  4943. # Round vocab size to next multiple of 16
  4944. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 16)
  4945. # pad using ceiling division
  4946. # ref: https://stackoverflow.com/a/17511341/22827863
  4947. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  4948. self.hparams["vocab_size"] = vocab_size
  4949. if (self.dir_model / "tokenizer.model").is_file():
  4950. self._set_vocab_sentencepiece()
  4951. elif (self.dir_model / "tokenizer.model.v3").is_file():
  4952. # mamba-codestral
  4953. raise NotImplementedError(f"Please rename {self.dir_model / 'tokenizer.model.v3'} to {self.dir_model / 'tokenizer.model'}")
  4954. elif (self.dir_model / "tokenizer.json").is_file():
  4955. self._set_vocab_gpt2()
  4956. else:
  4957. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  4958. self._set_vocab_builtin("gpt-neox", vocab_size)
  4959. def set_gguf_parameters(self):
  4960. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  4961. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 128
  4962. head_dim = self.find_hparam(["mamba_d_head", "head_dim"], optional=True) or 64
  4963. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  4964. # Fail early for models which don't have a block expansion factor of 2
  4965. # TODO: does this really matter?
  4966. # skip the assertion for FalconH1 Model
  4967. if self.model_arch != gguf.MODEL_ARCH.FALCON_H1:
  4968. assert self.d_inner == 2 * self.d_model
  4969. assert self.d_inner % head_dim == 0
  4970. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  4971. self.gguf_writer.add_embedding_length(self.d_model)
  4972. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  4973. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  4974. self.gguf_writer.add_block_count(self.block_count)
  4975. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4976. self.gguf_writer.add_ssm_inner_size(self.d_inner)
  4977. self.gguf_writer.add_ssm_state_size(d_state)
  4978. self.gguf_writer.add_ssm_time_step_rank(self.d_inner // head_dim)
  4979. self.gguf_writer.add_ssm_group_count(self.n_group)
  4980. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4981. self.gguf_writer.add_file_type(self.ftype)
  4982. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4983. if name.startswith("model.backbone") or name.startswith("model.lm_head"):
  4984. # map Mamba-Codestral-7B-v0.1 tensor names to the names used by Mamba-2
  4985. name = name.removeprefix("model.")
  4986. if name.endswith(".dt_bias"):
  4987. name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
  4988. new_name = self.map_tensor_name(name)
  4989. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4990. data_torch = data_torch.squeeze()
  4991. elif any(self.match_model_tensor_name(new_name, t, bid, suffix="") for t in [
  4992. gguf.MODEL_TENSOR.SSM_A,
  4993. gguf.MODEL_TENSOR.SSM_D,
  4994. ]):
  4995. # unsqueeze A to use similar shape semantics as Mamba-1
  4996. # (D is also unsqueezed, but for more straightforward broadcast internally)
  4997. data_torch = data_torch.reshape((*data_torch.shape, 1))
  4998. elif self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_NORM, bid):
  4999. data_torch = data_torch.reshape((self.n_group, self.d_inner // self.n_group))
  5000. if name.endswith(".A_log"):
  5001. logger.debug("A_log --> A ==> " + new_name)
  5002. data_torch = -torch.exp(data_torch)
  5003. yield (new_name, data_torch)
  5004. @ModelBase.register("JambaForCausalLM")
  5005. class JambaModel(TextModel):
  5006. model_arch = gguf.MODEL_ARCH.JAMBA
  5007. def set_vocab(self):
  5008. if (self.dir_model / "tokenizer.model").is_file():
  5009. self._set_vocab_sentencepiece()
  5010. else:
  5011. self._set_vocab_llama_hf()
  5012. self.gguf_writer.add_add_space_prefix(False)
  5013. def set_gguf_parameters(self):
  5014. d_model = self.find_hparam(["hidden_size", "mamba_d_model"])
  5015. d_conv = self.find_hparam(["mamba_d_conv"], optional=True) or 4
  5016. d_inner = self.hparams["mamba_expand"] * d_model
  5017. d_state = self.find_hparam(["mamba_d_state"], optional=True) or 16
  5018. # ceiling division
  5019. # ref: https://stackoverflow.com/a/17511341/22827863
  5020. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  5021. dt_rank = self.find_hparam(["mamba_dt_rank"], optional=True) or -(d_model // -16)
  5022. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-6
  5023. n_kv_head = self.hparams["num_key_value_heads"]
  5024. attn_offset = self.hparams["attn_layer_offset"]
  5025. attn_period = self.hparams["attn_layer_period"]
  5026. n_kv_vec = [0 for _ in range(attn_offset)] + [
  5027. n_kv_head if (i - attn_offset) % attn_period == 0 else 0 for i in range(attn_offset, self.block_count)
  5028. ]
  5029. self.gguf_writer.add_block_count(self.block_count)
  5030. self.gguf_writer.add_context_length(self.find_hparam(["max_position_embeddings", "n_ctx"]))
  5031. self.gguf_writer.add_embedding_length(d_model)
  5032. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  5033. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  5034. self.gguf_writer.add_head_count_kv(n_kv_vec)
  5035. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  5036. self.gguf_writer.add_ssm_inner_size(d_inner)
  5037. self.gguf_writer.add_ssm_state_size(d_state)
  5038. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  5039. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  5040. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  5041. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  5042. self.gguf_writer.add_file_type(self.ftype)
  5043. _experts: list[dict[str, Tensor]] | None = None
  5044. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5045. # Mini-Jamba
  5046. name = name.replace(".moe.", ".feed_forward.")
  5047. if bid is not None:
  5048. moe_offset = self.hparams["expert_layer_offset"]
  5049. moe_period = self.hparams["expert_layer_period"]
  5050. if not (bid >= moe_offset and (bid - moe_offset) % moe_period == 0):
  5051. name = name.replace(".experts.0.", ".")
  5052. # process the experts separately
  5053. if ".feed_forward.experts." in name:
  5054. n_experts = self.hparams["num_experts"]
  5055. assert bid is not None
  5056. if self._experts is None:
  5057. self._experts = [{} for _ in range(self.block_count)]
  5058. self._experts[bid][name] = data_torch
  5059. if len(self._experts[bid]) >= n_experts * 3:
  5060. # merge the experts into a single 3d tensor
  5061. for wid in ["down_proj", "gate_proj", "up_proj"]:
  5062. datas: list[Tensor] = []
  5063. for xid in range(n_experts):
  5064. ename = f"model.layers.{bid}.feed_forward.experts.{xid}.{wid}.weight"
  5065. datas.append(self._experts[bid][ename])
  5066. del self._experts[bid][ename]
  5067. data_torch = torch.stack(datas, dim=0)
  5068. # using the same merged name as qwen2moe
  5069. merged_name = f"model.layers.{bid}.mlp.experts.{wid}.weight"
  5070. new_name = self.map_tensor_name(merged_name)
  5071. yield new_name, data_torch
  5072. return
  5073. new_name = self.map_tensor_name(name)
  5074. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  5075. data_torch = data_torch.squeeze()
  5076. if name.endswith(".A_log"):
  5077. logger.debug("A_log --> A ==> " + new_name)
  5078. data_torch = -torch.exp(data_torch)
  5079. yield (new_name, data_torch)
  5080. def prepare_tensors(self):
  5081. super().prepare_tensors()
  5082. if self._experts is not None:
  5083. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5084. experts = [k for d in self._experts for k in d.keys()]
  5085. if len(experts) > 0:
  5086. raise ValueError(f"Unprocessed experts: {experts}")
  5087. @ModelBase.register("CohereForCausalLM")
  5088. class CommandR2Model(TextModel):
  5089. model_arch = gguf.MODEL_ARCH.COMMAND_R
  5090. def __init__(self, *args, **kwargs):
  5091. super().__init__(*args, **kwargs)
  5092. # max_position_embeddings = 8192 in config.json but model was actually
  5093. # trained on 128k context length
  5094. # aya-23 models don't have model_max_length specified
  5095. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  5096. def set_gguf_parameters(self):
  5097. super().set_gguf_parameters()
  5098. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  5099. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  5100. @ModelBase.register("Cohere2ForCausalLM")
  5101. class Cohere2Model(TextModel):
  5102. model_arch = gguf.MODEL_ARCH.COHERE2
  5103. def set_gguf_parameters(self):
  5104. super().set_gguf_parameters()
  5105. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  5106. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  5107. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  5108. rotary_pct = self.hparams["rotary_pct"]
  5109. hidden_size = self.hparams["hidden_size"]
  5110. num_attention_heads = self.hparams["num_attention_heads"]
  5111. self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
  5112. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  5113. @ModelBase.register("OlmoForCausalLM")
  5114. @ModelBase.register("OLMoForCausalLM")
  5115. class OlmoModel(TextModel):
  5116. model_arch = gguf.MODEL_ARCH.OLMO
  5117. def set_gguf_parameters(self):
  5118. super().set_gguf_parameters()
  5119. self.gguf_writer.add_layer_norm_eps(1e-5)
  5120. clip_qkv = self.hparams.get("clip_qkv")
  5121. if clip_qkv is not None:
  5122. self.gguf_writer.add_clamp_kqv(clip_qkv)
  5123. # Same as super class, but permuting q_proj, k_proj
  5124. # Copied from: LlamaModel
  5125. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5126. del bid # unused
  5127. n_head = self.hparams["num_attention_heads"]
  5128. n_kv_head = self.hparams.get("num_key_value_heads")
  5129. if name.endswith("q_proj.weight"):
  5130. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  5131. if name.endswith("k_proj.weight"):
  5132. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  5133. return [(self.map_tensor_name(name), data_torch)]
  5134. @ModelBase.register("SeedOssForCausalLM")
  5135. class SeedOssModel(TextModel):
  5136. model_arch = gguf.MODEL_ARCH.SEED_OSS
  5137. @ModelBase.register("Olmo2ForCausalLM")
  5138. @ModelBase.register("Olmo3ForCausalLM")
  5139. class Olmo2Model(TextModel):
  5140. model_arch = gguf.MODEL_ARCH.OLMO2
  5141. def set_gguf_parameters(self):
  5142. super().set_gguf_parameters()
  5143. rope_scaling = self.hparams.get("rope_scaling") or {}
  5144. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  5145. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  5146. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  5147. self.gguf_writer.add_rope_scaling_attn_factors(rope_scaling["attention_factor"])
  5148. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  5149. if "sliding_window" in self.hparams:
  5150. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  5151. sliding_window_pattern = []
  5152. if "layer_types" in self.hparams:
  5153. sliding_window_pattern = [t == "sliding_attention" for t in self.hparams["layer_types"]]
  5154. else:
  5155. # Olmo2 does not use sliding window attention.
  5156. # Olmo3 defaults to using sliding window for all layers except every 4th.
  5157. for i in range(self.hparams["num_hidden_layers"]):
  5158. sliding_window_pattern.append((i + 1) % 4 != 0)
  5159. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  5160. @ModelBase.register("OlmoeForCausalLM")
  5161. class OlmoeModel(TextModel):
  5162. model_arch = gguf.MODEL_ARCH.OLMOE
  5163. def set_gguf_parameters(self):
  5164. super().set_gguf_parameters()
  5165. self.gguf_writer.add_layer_norm_rms_eps(1e-5)
  5166. if (n_experts := self.hparams.get("num_experts")) is not None:
  5167. self.gguf_writer.add_expert_count(n_experts)
  5168. _experts: list[dict[str, Tensor]] | None = None
  5169. # Copied from: Qwen2MoeModel
  5170. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5171. # process the experts separately
  5172. if name.find("experts") != -1:
  5173. n_experts = self.hparams["num_experts"]
  5174. assert bid is not None
  5175. if self._experts is None:
  5176. self._experts = [{} for _ in range(self.block_count)]
  5177. self._experts[bid][name] = data_torch
  5178. if len(self._experts[bid]) >= n_experts * 3:
  5179. tensors: list[tuple[str, Tensor]] = []
  5180. # merge the experts into a single 3d tensor
  5181. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5182. datas: list[Tensor] = []
  5183. for xid in range(n_experts):
  5184. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5185. datas.append(self._experts[bid][ename])
  5186. del self._experts[bid][ename]
  5187. data_torch = torch.stack(datas, dim=0)
  5188. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5189. new_name = self.map_tensor_name(merged_name)
  5190. tensors.append((new_name, data_torch))
  5191. return tensors
  5192. else:
  5193. return []
  5194. return [(self.map_tensor_name(name), data_torch)]
  5195. # Copied from: Qwen2MoeModel
  5196. def prepare_tensors(self):
  5197. super().prepare_tensors()
  5198. if self._experts is not None:
  5199. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5200. experts = [k for d in self._experts for k in d.keys()]
  5201. if len(experts) > 0:
  5202. raise ValueError(f"Unprocessed experts: {experts}")
  5203. @ModelBase.register("JinaBertModel", "JinaBertForMaskedLM")
  5204. class JinaBertV2Model(BertModel):
  5205. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  5206. def set_vocab(self):
  5207. tokenizer_class = 'BertTokenizer'
  5208. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  5209. tokenizer_class = json.load(f)['tokenizer_class']
  5210. if tokenizer_class == 'BertTokenizer':
  5211. super().set_vocab()
  5212. elif tokenizer_class == 'RobertaTokenizer':
  5213. self._set_vocab_gpt2()
  5214. self.gguf_writer.add_token_type_count(2)
  5215. else:
  5216. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  5217. @ModelBase.register("OpenELMForCausalLM")
  5218. class OpenELMModel(TextModel):
  5219. model_arch = gguf.MODEL_ARCH.OPENELM
  5220. @staticmethod
  5221. def _make_divisible(v: float | int, divisor: int) -> int:
  5222. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  5223. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  5224. # Make sure that round down does not go down by more than 10%.
  5225. if new_v < 0.9 * v:
  5226. new_v += divisor
  5227. return new_v
  5228. def __init__(self, *args, **kwargs):
  5229. super().__init__(*args, **kwargs)
  5230. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  5231. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  5232. self._n_embd: int = self.hparams["model_dim"]
  5233. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  5234. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  5235. self._ffn_dims: list[int] = [
  5236. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  5237. for multiplier in ffn_multipliers
  5238. ]
  5239. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  5240. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  5241. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  5242. def set_vocab(self):
  5243. try:
  5244. self._set_vocab_sentencepiece()
  5245. except FileNotFoundError:
  5246. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  5247. def set_gguf_parameters(self):
  5248. n_embd = self._n_embd
  5249. head_dim = self.hparams["head_dim"]
  5250. rot_pct = 1.0
  5251. assert self.block_count == len(self._num_kv_heads)
  5252. assert self.block_count == len(self._num_query_heads)
  5253. assert self.block_count == len(self._ffn_dims)
  5254. self.gguf_writer.add_block_count(self.block_count)
  5255. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  5256. self.gguf_writer.add_embedding_length(n_embd)
  5257. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  5258. self.gguf_writer.add_head_count(self._num_query_heads)
  5259. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  5260. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  5261. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  5262. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  5263. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  5264. self.gguf_writer.add_key_length(head_dim)
  5265. self.gguf_writer.add_value_length(head_dim)
  5266. self.gguf_writer.add_file_type(self.ftype)
  5267. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  5268. if "n_layers" in keys:
  5269. return self.hparams["num_transformer_layers"]
  5270. return super().find_hparam(keys, optional)
  5271. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5272. # split ff
  5273. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  5274. ff_dim = self._ffn_dims[bid]
  5275. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  5276. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  5277. return
  5278. yield (self.map_tensor_name(name), data_torch)
  5279. @ModelBase.register("ArcticForCausalLM")
  5280. class ArcticModel(TextModel):
  5281. model_arch = gguf.MODEL_ARCH.ARCTIC
  5282. def set_vocab(self):
  5283. # The reason for using a custom implementation here is that the
  5284. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  5285. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  5286. from sentencepiece import SentencePieceProcessor
  5287. tokenizer_path = self.dir_model / 'tokenizer.model'
  5288. if not tokenizer_path.is_file():
  5289. logger.error(f'Error: Missing {tokenizer_path}')
  5290. sys.exit(1)
  5291. # Read the whole vocabulary from the tokenizer.model file
  5292. tokenizer = SentencePieceProcessor()
  5293. tokenizer.LoadFromFile(str(tokenizer_path))
  5294. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5295. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5296. scores: list[float] = [-10000.0] * vocab_size
  5297. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5298. for token_id in range(tokenizer.vocab_size()):
  5299. piece = tokenizer.IdToPiece(token_id)
  5300. text = piece.encode("utf-8")
  5301. score = tokenizer.GetScore(token_id)
  5302. toktype = SentencePieceTokenTypes.NORMAL
  5303. if tokenizer.IsUnknown(token_id):
  5304. toktype = SentencePieceTokenTypes.UNKNOWN
  5305. elif tokenizer.IsControl(token_id):
  5306. toktype = SentencePieceTokenTypes.CONTROL
  5307. elif tokenizer.IsUnused(token_id):
  5308. toktype = SentencePieceTokenTypes.UNUSED
  5309. elif tokenizer.IsByte(token_id):
  5310. toktype = SentencePieceTokenTypes.BYTE
  5311. tokens[token_id] = text
  5312. scores[token_id] = score
  5313. toktypes[token_id] = toktype
  5314. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  5315. # of information about added/redefined tokens and modify them accordingly.
  5316. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  5317. if tokenizer_config_file.is_file():
  5318. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  5319. tokenizer_config_json = json.load(f)
  5320. if "added_tokens_decoder" in tokenizer_config_json:
  5321. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  5322. for token_id, token_json in added_tokens_decoder.items():
  5323. token_id = int(token_id)
  5324. if token_id >= vocab_size:
  5325. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5326. continue
  5327. token_content = token_json["content"]
  5328. token_type = SentencePieceTokenTypes.USER_DEFINED
  5329. token_score = -10000.0
  5330. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  5331. # Set the score to 0.0 as in the original tokenizer.model
  5332. if ("special" in token_json) and token_json["special"]:
  5333. if token_content == tokenizer_config_json["unk_token"]:
  5334. token_type = SentencePieceTokenTypes.UNKNOWN
  5335. else:
  5336. token_type = SentencePieceTokenTypes.CONTROL
  5337. token_score = 0.0
  5338. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  5339. tokens[token_id] = token_content.encode("utf-8")
  5340. toktypes[token_id] = token_type
  5341. scores[token_id] = token_score
  5342. self.gguf_writer.add_tokenizer_model("llama")
  5343. self.gguf_writer.add_tokenizer_pre("default")
  5344. self.gguf_writer.add_token_list(tokens)
  5345. self.gguf_writer.add_token_scores(scores)
  5346. self.gguf_writer.add_token_types(toktypes)
  5347. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5348. special_vocab.add_to_gguf(self.gguf_writer)
  5349. def set_gguf_parameters(self):
  5350. super().set_gguf_parameters()
  5351. hparams = self.hparams
  5352. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5353. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  5354. _experts: list[dict[str, Tensor]] | None = None
  5355. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5356. n_head = self.hparams["num_attention_heads"]
  5357. n_kv_head = self.hparams.get("num_key_value_heads")
  5358. if name.endswith("q_proj.weight"):
  5359. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  5360. if name.endswith("k_proj.weight"):
  5361. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  5362. # process the experts separately
  5363. if name.find("block_sparse_moe.experts") != -1:
  5364. n_experts = self.hparams["num_local_experts"]
  5365. assert bid is not None
  5366. if self._experts is None:
  5367. self._experts = [{} for _ in range(self.block_count)]
  5368. self._experts[bid][name] = data_torch
  5369. if len(self._experts[bid]) >= n_experts * 3:
  5370. tensors: list[tuple[str, Tensor]] = []
  5371. # merge the experts into a single 3d tensor
  5372. for wid in ["w1", "w2", "w3"]:
  5373. datas: list[Tensor] = []
  5374. for xid in range(n_experts):
  5375. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  5376. datas.append(self._experts[bid][ename])
  5377. del self._experts[bid][ename]
  5378. data_torch = torch.stack(datas, dim=0)
  5379. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  5380. new_name = self.map_tensor_name(merged_name)
  5381. tensors.append((new_name, data_torch))
  5382. return tensors
  5383. else:
  5384. return []
  5385. return [(self.map_tensor_name(name), data_torch)]
  5386. def prepare_tensors(self):
  5387. super().prepare_tensors()
  5388. if self._experts is not None:
  5389. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5390. experts = [k for d in self._experts for k in d.keys()]
  5391. if len(experts) > 0:
  5392. raise ValueError(f"Unprocessed experts: {experts}")
  5393. @ModelBase.register("DeepseekForCausalLM")
  5394. class DeepseekModel(TextModel):
  5395. model_arch = gguf.MODEL_ARCH.DEEPSEEK
  5396. def set_vocab(self):
  5397. try:
  5398. self._set_vocab_sentencepiece()
  5399. except FileNotFoundError:
  5400. self._set_vocab_gpt2()
  5401. def set_gguf_parameters(self):
  5402. super().set_gguf_parameters()
  5403. hparams = self.hparams
  5404. if (rope_dim := hparams.get("head_dim")) is None:
  5405. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  5406. self.gguf_writer.add_rope_dimension_count(rope_dim)
  5407. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  5408. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  5409. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5410. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  5411. self.gguf_writer.add_expert_weights_scale(1.0)
  5412. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  5413. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  5414. _experts: list[dict[str, Tensor]] | None = None
  5415. @staticmethod
  5416. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  5417. if n_head_kv is not None and n_head != n_head_kv:
  5418. n_head = n_head_kv
  5419. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  5420. .swapaxes(1, 2)
  5421. .reshape(weights.shape))
  5422. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5423. n_head = self.hparams["num_attention_heads"]
  5424. n_kv_head = self.hparams.get("num_key_value_heads")
  5425. if name.endswith(("q_proj.weight", "q_proj.bias")):
  5426. data_torch = DeepseekModel.permute(data_torch, n_head, n_head)
  5427. if name.endswith(("k_proj.weight", "k_proj.bias")):
  5428. data_torch = DeepseekModel.permute(data_torch, n_head, n_kv_head)
  5429. # process the experts separately
  5430. if name.find("mlp.experts") != -1:
  5431. n_experts = self.hparams["n_routed_experts"]
  5432. assert bid is not None
  5433. if self._experts is None:
  5434. self._experts = [{} for _ in range(self.block_count)]
  5435. self._experts[bid][name] = data_torch
  5436. if len(self._experts[bid]) >= n_experts * 3:
  5437. tensors: list[tuple[str, Tensor]] = []
  5438. # merge the experts into a single 3d tensor
  5439. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5440. datas: list[Tensor] = []
  5441. for xid in range(n_experts):
  5442. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5443. datas.append(self._experts[bid][ename])
  5444. del self._experts[bid][ename]
  5445. data_torch = torch.stack(datas, dim=0)
  5446. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5447. new_name = self.map_tensor_name(merged_name)
  5448. tensors.append((new_name, data_torch))
  5449. return tensors
  5450. else:
  5451. return []
  5452. return [(self.map_tensor_name(name), data_torch)]
  5453. def prepare_tensors(self):
  5454. super().prepare_tensors()
  5455. if self._experts is not None:
  5456. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5457. experts = [k for d in self._experts for k in d.keys()]
  5458. if len(experts) > 0:
  5459. raise ValueError(f"Unprocessed experts: {experts}")
  5460. @ModelBase.register(
  5461. "DeepseekV2ForCausalLM",
  5462. "DeepseekV3ForCausalLM",
  5463. "KimiVLForConditionalGeneration",
  5464. )
  5465. class DeepseekV2Model(TextModel):
  5466. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  5467. def set_vocab(self):
  5468. try:
  5469. self._set_vocab_gpt2()
  5470. return
  5471. except Exception:
  5472. pass
  5473. from transformers import AutoTokenizer
  5474. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  5475. tokpre = self.get_vocab_base_pre(tokenizer)
  5476. if tokpre == "kimi-k2":
  5477. # Build merges list using the approach similar to HunYuanMoE
  5478. merges = []
  5479. vocab = {}
  5480. mergeable_ranks = tokenizer.model._mergeable_ranks
  5481. for token, rank in mergeable_ranks.items():
  5482. vocab[QwenModel.token_bytes_to_string(token)] = rank
  5483. if len(token) == 1:
  5484. continue
  5485. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  5486. if len(merged) == 2:
  5487. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  5488. # Build token list
  5489. vocab_size = self.hparams["vocab_size"]
  5490. special_tokens = tokenizer.special_tokens
  5491. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  5492. tokens: list[str] = []
  5493. toktypes: list[int] = []
  5494. for i in range(vocab_size):
  5495. if i not in reverse_vocab:
  5496. tokens.append(f"[PAD{i}]")
  5497. toktypes.append(gguf.TokenType.UNUSED)
  5498. else:
  5499. token = reverse_vocab[i]
  5500. tokens.append(token)
  5501. if i in special_tokens.values():
  5502. toktypes.append(gguf.TokenType.CONTROL)
  5503. else:
  5504. toktypes.append(gguf.TokenType.NORMAL)
  5505. self.gguf_writer.add_tokenizer_model("gpt2")
  5506. self.gguf_writer.add_tokenizer_pre(tokpre)
  5507. self.gguf_writer.add_token_list(tokens)
  5508. self.gguf_writer.add_token_types(toktypes)
  5509. self.gguf_writer.add_token_merges(merges)
  5510. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  5511. special_vocab.add_to_gguf(self.gguf_writer)
  5512. else:
  5513. raise NotImplementedError(f"Deepseek pre-tokenizer {tokpre!r} is not supported yet!")
  5514. def set_gguf_parameters(self):
  5515. # note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
  5516. self.hparams["num_key_value_heads"] = 1
  5517. super().set_gguf_parameters()
  5518. hparams = self.hparams
  5519. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  5520. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5521. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  5522. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  5523. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  5524. # note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
  5525. self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
  5526. self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
  5527. self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  5528. self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
  5529. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  5530. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  5531. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  5532. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  5533. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  5534. if hparams["scoring_func"] == "sigmoid":
  5535. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5536. elif hparams["scoring_func"] == "softmax":
  5537. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  5538. else:
  5539. raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
  5540. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  5541. rope_scaling = self.hparams.get("rope_scaling") or {}
  5542. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  5543. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  5544. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  5545. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  5546. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * rope_scaling["mscale_all_dim"])
  5547. _experts: list[dict[str, Tensor]] | None = None
  5548. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5549. # skip vision tensors and remove "language_model." for Kimi-VL
  5550. if "vision_tower" in name or "multi_modal_projector" in name:
  5551. return []
  5552. if name.startswith("language_model."):
  5553. name = name.replace("language_model.", "")
  5554. # rename e_score_correction_bias tensors
  5555. if name.endswith("e_score_correction_bias"):
  5556. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5557. # skip Multi-Token Prediction (MTP) layers
  5558. block_count = self.hparams["num_hidden_layers"]
  5559. match = re.match(r"model.layers.(\d+)", name)
  5560. if match and int(match.group(1)) >= block_count:
  5561. return []
  5562. # process the experts separately
  5563. if name.find("mlp.experts") != -1:
  5564. n_experts = self.hparams["n_routed_experts"]
  5565. assert bid is not None
  5566. if self._experts is None:
  5567. self._experts = [{} for _ in range(self.block_count)]
  5568. self._experts[bid][name] = data_torch
  5569. if len(self._experts[bid]) >= n_experts * 3:
  5570. tensors: list[tuple[str, Tensor]] = []
  5571. # merge the experts into a single 3d tensor
  5572. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5573. datas: list[Tensor] = []
  5574. for xid in range(n_experts):
  5575. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5576. datas.append(self._experts[bid][ename])
  5577. del self._experts[bid][ename]
  5578. data_torch = torch.stack(datas, dim=0)
  5579. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5580. new_name = self.map_tensor_name(merged_name)
  5581. tensors.append((new_name, data_torch))
  5582. return tensors
  5583. else:
  5584. return []
  5585. # note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
  5586. if name.endswith("kv_b_proj.weight"):
  5587. name_kb = name.replace("kv_b_proj", "k_b_proj")
  5588. name_vb = name.replace("kv_b_proj", "v_b_proj")
  5589. n_head_kv = self.hparams["num_key_value_heads"]
  5590. v_head_dim = self.hparams["v_head_dim"]
  5591. qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
  5592. assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
  5593. kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
  5594. k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
  5595. k_b = k_b.transpose(1, 2)
  5596. return [
  5597. (self.map_tensor_name(name_kb), k_b),
  5598. (self.map_tensor_name(name_vb), v_b)
  5599. ]
  5600. return [(self.map_tensor_name(name), data_torch)]
  5601. def prepare_tensors(self):
  5602. super().prepare_tensors()
  5603. if self._experts is not None:
  5604. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5605. experts = [k for d in self._experts for k in d.keys()]
  5606. if len(experts) > 0:
  5607. raise ValueError(f"Unprocessed experts: {experts}")
  5608. @ModelBase.register("Dots1ForCausalLM")
  5609. class Dots1Model(Qwen2MoeModel):
  5610. model_arch = gguf.MODEL_ARCH.DOTS1
  5611. def __init__(self, *args, **kwargs):
  5612. super().__init__(*args, **kwargs)
  5613. self.hparams["num_experts"] = self.hparams["n_routed_experts"]
  5614. def set_gguf_parameters(self):
  5615. super().set_gguf_parameters()
  5616. self.gguf_writer.add_leading_dense_block_count(self.hparams["first_k_dense_replace"])
  5617. self.gguf_writer.add_expert_shared_count(self.hparams["n_shared_experts"])
  5618. self.gguf_writer.add_expert_weights_scale(self.hparams["routed_scaling_factor"])
  5619. self.gguf_writer.add_expert_weights_norm(self.hparams["norm_topk_prob"])
  5620. if self.hparams["scoring_func"] == "noaux_tc":
  5621. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5622. else:
  5623. raise ValueError(f"Unsupported scoring_func value: {self.hparams['scoring_func']}")
  5624. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  5625. if name.endswith("e_score_correction_bias"):
  5626. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5627. if "shared_experts" in name:
  5628. return [(self.map_tensor_name(name), data_torch)]
  5629. return super().modify_tensors(data_torch, name, bid)
  5630. @ModelBase.register("PLMForCausalLM")
  5631. class PLMModel(TextModel):
  5632. model_arch = gguf.MODEL_ARCH.PLM
  5633. def set_vocab(self):
  5634. self._set_vocab_gpt2()
  5635. def set_gguf_parameters(self):
  5636. super().set_gguf_parameters()
  5637. hparams = self.hparams
  5638. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5639. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  5640. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  5641. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  5642. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  5643. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5644. return [(self.map_tensor_name(name), data_torch)]
  5645. def prepare_tensors(self):
  5646. super().prepare_tensors()
  5647. @ModelBase.register("T5WithLMHeadModel")
  5648. @ModelBase.register("T5ForConditionalGeneration")
  5649. @ModelBase.register("MT5ForConditionalGeneration")
  5650. @ModelBase.register("UMT5ForConditionalGeneration")
  5651. class T5Model(TextModel):
  5652. model_arch = gguf.MODEL_ARCH.T5
  5653. def __init__(self, *args, **kwargs):
  5654. super().__init__(*args, **kwargs)
  5655. self.shared_token_embeddings_found = False
  5656. def set_vocab(self):
  5657. # to avoid TypeError: Descriptors cannot be created directly
  5658. # exception when importing sentencepiece_model_pb2
  5659. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  5660. from sentencepiece import SentencePieceProcessor
  5661. from sentencepiece import sentencepiece_model_pb2 as model
  5662. tokenizer_path = self.dir_model / 'tokenizer.model'
  5663. # many older models use spiece.model tokenizer model filename
  5664. if not tokenizer_path.is_file():
  5665. tokenizer_path = self.dir_model / 'spiece.model'
  5666. if not tokenizer_path.is_file():
  5667. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  5668. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  5669. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  5670. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  5671. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  5672. # assure the tokenizer model file name is correct
  5673. assert tokenizer_path.name == 'tokenizer.model'
  5674. return self._set_vocab_sentencepiece()
  5675. else:
  5676. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  5677. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  5678. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  5679. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  5680. tokenizer = SentencePieceProcessor()
  5681. tokenizer.LoadFromFile(str(tokenizer_path))
  5682. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5683. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5684. scores: list[float] = [-10000.0] * vocab_size
  5685. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5686. for token_id in range(tokenizer.vocab_size()):
  5687. piece = tokenizer.IdToPiece(token_id)
  5688. text = piece.encode("utf-8")
  5689. score = tokenizer.GetScore(token_id)
  5690. toktype = SentencePieceTokenTypes.NORMAL
  5691. if tokenizer.IsUnknown(token_id):
  5692. toktype = SentencePieceTokenTypes.UNKNOWN
  5693. elif tokenizer.IsControl(token_id):
  5694. toktype = SentencePieceTokenTypes.CONTROL
  5695. elif tokenizer.IsUnused(token_id):
  5696. toktype = SentencePieceTokenTypes.UNUSED
  5697. elif tokenizer.IsByte(token_id):
  5698. toktype = SentencePieceTokenTypes.BYTE
  5699. tokens[token_id] = text
  5700. scores[token_id] = score
  5701. toktypes[token_id] = toktype
  5702. added_tokens_file = self.dir_model / 'added_tokens.json'
  5703. if added_tokens_file.is_file():
  5704. with open(added_tokens_file, "r", encoding="utf-8") as f:
  5705. added_tokens_json = json.load(f)
  5706. for key in added_tokens_json:
  5707. token_id = added_tokens_json[key]
  5708. if token_id >= vocab_size:
  5709. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5710. continue
  5711. tokens[token_id] = key.encode("utf-8")
  5712. scores[token_id] = -1000.0
  5713. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  5714. if vocab_size > len(tokens):
  5715. pad_count = vocab_size - len(tokens)
  5716. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  5717. for i in range(1, pad_count + 1):
  5718. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  5719. scores.append(-1000.0)
  5720. toktypes.append(SentencePieceTokenTypes.UNUSED)
  5721. self.gguf_writer.add_tokenizer_model("t5")
  5722. self.gguf_writer.add_tokenizer_pre("default")
  5723. self.gguf_writer.add_token_list(tokens)
  5724. self.gguf_writer.add_token_scores(scores)
  5725. self.gguf_writer.add_token_types(toktypes)
  5726. self.gguf_writer.add_add_space_prefix(add_prefix)
  5727. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  5728. if precompiled_charsmap:
  5729. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  5730. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5731. special_vocab.add_to_gguf(self.gguf_writer)
  5732. def set_gguf_parameters(self):
  5733. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  5734. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  5735. n_ctx = 512
  5736. self.gguf_writer.add_context_length(n_ctx)
  5737. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  5738. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  5739. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  5740. if (dec_n_layer := self.hparams.get("num_decoder_layers")) is not None:
  5741. self.gguf_writer.add_decoder_block_count(dec_n_layer)
  5742. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  5743. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  5744. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  5745. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5746. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  5747. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  5748. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  5749. self.gguf_writer.add_file_type(self.ftype)
  5750. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5751. del bid # unused
  5752. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  5753. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  5754. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  5755. # and decoder and ignore the remaining ones.
  5756. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  5757. if not self.shared_token_embeddings_found:
  5758. name = "shared.weight"
  5759. self.shared_token_embeddings_found = True
  5760. else:
  5761. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  5762. return []
  5763. return [(self.map_tensor_name(name), data_torch)]
  5764. @ModelBase.register("T5EncoderModel")
  5765. class T5EncoderModel(TextModel):
  5766. model_arch = gguf.MODEL_ARCH.T5ENCODER
  5767. def __init__(self, *args, **kwargs):
  5768. super().__init__(*args, **kwargs)
  5769. self.shared_token_embeddings_found = False
  5770. def set_vocab(self):
  5771. # to avoid TypeError: Descriptors cannot be created directly
  5772. # exception when importing sentencepiece_model_pb2
  5773. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  5774. from sentencepiece import SentencePieceProcessor
  5775. from sentencepiece import sentencepiece_model_pb2 as model
  5776. tokenizer_path = self.dir_model / 'tokenizer.model'
  5777. # many older models use spiece.model tokenizer model filename
  5778. if not tokenizer_path.is_file():
  5779. tokenizer_path = self.dir_model / 'spiece.model'
  5780. if not tokenizer_path.is_file():
  5781. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  5782. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  5783. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  5784. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  5785. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  5786. # assure the tokenizer model file name is correct
  5787. assert tokenizer_path.name == 'tokenizer.model'
  5788. return self._set_vocab_sentencepiece()
  5789. else:
  5790. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  5791. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  5792. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  5793. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  5794. tokenizer = SentencePieceProcessor()
  5795. tokenizer.LoadFromFile(str(tokenizer_path))
  5796. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5797. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5798. scores: list[float] = [-10000.0] * vocab_size
  5799. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5800. for token_id in range(tokenizer.vocab_size()):
  5801. piece = tokenizer.IdToPiece(token_id)
  5802. text = piece.encode("utf-8")
  5803. score = tokenizer.GetScore(token_id)
  5804. toktype = SentencePieceTokenTypes.NORMAL
  5805. if tokenizer.IsUnknown(token_id):
  5806. toktype = SentencePieceTokenTypes.UNKNOWN
  5807. elif tokenizer.IsControl(token_id):
  5808. toktype = SentencePieceTokenTypes.CONTROL
  5809. elif tokenizer.IsUnused(token_id):
  5810. toktype = SentencePieceTokenTypes.UNUSED
  5811. elif tokenizer.IsByte(token_id):
  5812. toktype = SentencePieceTokenTypes.BYTE
  5813. tokens[token_id] = text
  5814. scores[token_id] = score
  5815. toktypes[token_id] = toktype
  5816. added_tokens_file = self.dir_model / 'added_tokens.json'
  5817. if added_tokens_file.is_file():
  5818. with open(added_tokens_file, "r", encoding="utf-8") as f:
  5819. added_tokens_json = json.load(f)
  5820. for key in added_tokens_json:
  5821. token_id = added_tokens_json[key]
  5822. if token_id >= vocab_size:
  5823. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5824. continue
  5825. tokens[token_id] = key.encode("utf-8")
  5826. scores[token_id] = -1000.0
  5827. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  5828. if vocab_size > len(tokens):
  5829. pad_count = vocab_size - len(tokens)
  5830. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  5831. for i in range(1, pad_count + 1):
  5832. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  5833. scores.append(-1000.0)
  5834. toktypes.append(SentencePieceTokenTypes.UNUSED)
  5835. self.gguf_writer.add_tokenizer_model("t5")
  5836. self.gguf_writer.add_tokenizer_pre("default")
  5837. self.gguf_writer.add_token_list(tokens)
  5838. self.gguf_writer.add_token_scores(scores)
  5839. self.gguf_writer.add_token_types(toktypes)
  5840. self.gguf_writer.add_add_space_prefix(add_prefix)
  5841. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  5842. if precompiled_charsmap:
  5843. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  5844. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5845. special_vocab.add_to_gguf(self.gguf_writer)
  5846. def set_gguf_parameters(self):
  5847. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  5848. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  5849. n_ctx = 512
  5850. self.gguf_writer.add_context_length(n_ctx)
  5851. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  5852. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  5853. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  5854. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  5855. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  5856. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  5857. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5858. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  5859. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  5860. self.gguf_writer.add_file_type(self.ftype)
  5861. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5862. del bid # unused
  5863. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  5864. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  5865. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  5866. # and decoder and ignore the remaining ones.
  5867. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  5868. if not self.shared_token_embeddings_found:
  5869. name = "shared.weight"
  5870. self.shared_token_embeddings_found = True
  5871. else:
  5872. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  5873. return []
  5874. return [(self.map_tensor_name(name), data_torch)]
  5875. @ModelBase.register("JAISLMHeadModel")
  5876. class JaisModel(TextModel):
  5877. model_arch = gguf.MODEL_ARCH.JAIS
  5878. def __init__(self, *args, **kwargs):
  5879. super().__init__(*args, **kwargs)
  5880. # SwigLU activation
  5881. assert self.hparams["activation_function"] == "swiglu"
  5882. # ALiBi position embedding
  5883. assert self.hparams["position_embedding_type"] == "alibi"
  5884. # Embeddings scale
  5885. self.embeddings_scale = 1.0
  5886. if 'mup_embeddings_scale' in self.hparams:
  5887. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  5888. elif 'embeddings_scale' in self.hparams:
  5889. self.embeddings_scale = self.hparams['embeddings_scale']
  5890. else:
  5891. assert False
  5892. self.width_scale = 1.0
  5893. if 'mup_output_alpha' in self.hparams:
  5894. assert 'mup_width_scale' in self.hparams
  5895. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  5896. elif 'width_scale' in self.hparams:
  5897. self.width_scale = self.hparams['width_scale']
  5898. else:
  5899. assert False
  5900. self.max_alibi_bias = 8.0
  5901. def set_vocab(self):
  5902. self._set_vocab_gpt2()
  5903. def set_gguf_parameters(self):
  5904. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  5905. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  5906. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  5907. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  5908. self.gguf_writer.add_head_count(self.hparams["n_head"])
  5909. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5910. self.gguf_writer.add_file_type(self.ftype)
  5911. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5912. del bid # unused
  5913. tensors: list[tuple[str, Tensor]] = []
  5914. # we don't need these
  5915. if name.endswith((".attn.bias")):
  5916. return tensors
  5917. if name.endswith(("relative_pe.slopes")):
  5918. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  5919. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  5920. # but Jais's PyTorch model simply precalculates the slope values and places them
  5921. # in relative_pes.slopes
  5922. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  5923. first_val = float(data_torch[0].item())
  5924. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  5925. return tensors
  5926. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  5927. data_torch = data_torch.transpose(1, 0)
  5928. new_name = self.map_tensor_name(name)
  5929. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  5930. tensors.append((new_name, data_torch * self.embeddings_scale))
  5931. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  5932. tensors.append((new_name, data_torch * self.width_scale))
  5933. else:
  5934. tensors.append((new_name, data_torch))
  5935. return tensors
  5936. def prepare_tensors(self):
  5937. super().prepare_tensors()
  5938. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  5939. @ModelBase.register("Glm4ForCausalLM", "Glm4vForConditionalGeneration")
  5940. class Glm4Model(TextModel):
  5941. model_arch = gguf.MODEL_ARCH.GLM4
  5942. def set_vocab(self):
  5943. from transformers import AutoTokenizer
  5944. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  5945. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5946. tokens, toktypes, tokpre = self.get_vocab_base()
  5947. self.gguf_writer.add_tokenizer_model("gpt2")
  5948. self.gguf_writer.add_tokenizer_pre(tokpre)
  5949. self.gguf_writer.add_token_list(tokens)
  5950. self.gguf_writer.add_token_types(toktypes)
  5951. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5952. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5953. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  5954. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  5955. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5956. special_vocab.add_to_gguf(self.gguf_writer)
  5957. def set_gguf_parameters(self):
  5958. super().set_gguf_parameters()
  5959. if (rope_dim := self.hparams.get("head_dim")) is None:
  5960. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  5961. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  5962. rope_scaling = self.hparams.get("rope_scaling") or {}
  5963. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  5964. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  5965. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  5966. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  5967. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5968. if name.startswith("model.visual."): # ignore visual part of Glm4v
  5969. return []
  5970. elif name.startswith("model.language_model."):
  5971. name = name.replace("language_model.", "") # for Glm4v
  5972. return super().modify_tensors(data_torch, name, bid)
  5973. @ModelBase.register("Glm4MoeForCausalLM")
  5974. class Glm4MoeModel(TextModel):
  5975. model_arch = gguf.MODEL_ARCH.GLM4_MOE
  5976. def __init__(self, *args, **kwargs):
  5977. super().__init__(*args, **kwargs)
  5978. # GLM4_MOE has num_hidden_layers + 1 actual layers (including NextN layer)
  5979. self.block_count = self.hparams["num_hidden_layers"] + self.hparams.get("num_nextn_predict_layers", 0)
  5980. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  5981. def set_vocab(self):
  5982. from transformers import AutoTokenizer
  5983. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  5984. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5985. tokens, toktypes, tokpre = self.get_vocab_base()
  5986. self.gguf_writer.add_tokenizer_model("gpt2")
  5987. self.gguf_writer.add_tokenizer_pre(tokpre)
  5988. self.gguf_writer.add_token_list(tokens)
  5989. self.gguf_writer.add_token_types(toktypes)
  5990. # Special tokens
  5991. # Note: Using <|endoftext|> (151329) for eot causes endless generation
  5992. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["[gMASK]"]) # 151331
  5993. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) # 151336
  5994. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) # 151329
  5995. special_vocab._set_special_token("eom", tokenizer.get_added_vocab()["<|observation|>"]) # 151338
  5996. # Patch broken chat template
  5997. if isinstance(special_vocab.chat_template, str) and "visible_text(m.content).endswith" in special_vocab.chat_template:
  5998. special_vocab.chat_template = special_vocab.chat_template.replace(
  5999. """{{ visible_text(m.content) }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}""",
  6000. """{% set content = visible_text(m.content) %}{{ content }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not content.endswith("/nothink")) else '' -}}""")
  6001. special_vocab.add_to_gguf(self.gguf_writer)
  6002. def set_gguf_parameters(self):
  6003. super().set_gguf_parameters()
  6004. if (rope_dim := self.hparams.get("head_dim")) is None:
  6005. rope_dim = (
  6006. self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6007. )
  6008. self.gguf_writer.add_rope_dimension_count(
  6009. int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))
  6010. )
  6011. # MoE parameters - Use only routed expert count (shared experts handled separately)
  6012. if (n_routed_experts := self.hparams.get("n_routed_experts")) is not None:
  6013. self.gguf_writer.add_expert_count(n_routed_experts)
  6014. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  6015. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  6016. if (n_shared_experts := self.hparams.get("n_shared_experts")) is not None:
  6017. self.gguf_writer.add_expert_shared_count(n_shared_experts)
  6018. if (first_k_dense_replace := self.hparams.get("first_k_dense_replace")) is not None:
  6019. self.gguf_writer.add_leading_dense_block_count(first_k_dense_replace)
  6020. # Expert gating function (sigmoid for GLM4_MOE)
  6021. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  6022. # Routed scaling factor
  6023. if (routed_scaling_factor := self.hparams.get("routed_scaling_factor")) is not None:
  6024. self.gguf_writer.add_expert_weights_scale(routed_scaling_factor)
  6025. # Normalise topk probabilities
  6026. if (norm_topk_prob := self.hparams.get("norm_topk_prob")) is not None:
  6027. self.gguf_writer.add_expert_weights_norm(norm_topk_prob)
  6028. # NextN/MTP prediction layers
  6029. if (num_nextn_predict_layers := self.hparams.get("num_nextn_predict_layers")) is not None:
  6030. self.gguf_writer.add_nextn_predict_layers(num_nextn_predict_layers)
  6031. _experts: list[dict[str, Tensor]] | None = None
  6032. def modify_tensors(
  6033. self, data_torch: Tensor, name: str, bid: int | None
  6034. ) -> Iterable[tuple[str, Tensor]]:
  6035. if name.startswith("model.visual."): # ignore visual part
  6036. return []
  6037. elif name.startswith("model.language_model."):
  6038. name = name.replace("language_model.", "") # for multimodal variants
  6039. # Handle main token embedding (but not layer-specific NextN embeddings)
  6040. if name == "model.embed_tokens.weight" and ".layers." not in name:
  6041. return [(self.map_tensor_name("token_embd.weight"), data_torch)]
  6042. # Handle routed experts
  6043. if name.find("mlp.experts") != -1:
  6044. n_experts = self.hparams["n_routed_experts"]
  6045. assert bid is not None
  6046. if self._experts is None:
  6047. self._experts = [{} for _ in range(self.block_count)]
  6048. self._experts[bid][name] = data_torch
  6049. if len(self._experts[bid]) >= n_experts * 3:
  6050. tensors: list[tuple[str, Tensor]] = []
  6051. # merge the experts into a single 3d tensor
  6052. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6053. datas: list[Tensor] = []
  6054. for xid in range(n_experts):
  6055. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6056. datas.append(self._experts[bid][ename])
  6057. del self._experts[bid][ename]
  6058. data_torch = torch.stack(datas, dim=0)
  6059. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6060. new_name = self.map_tensor_name(merged_name)
  6061. tensors.append((new_name, data_torch))
  6062. return tensors
  6063. else:
  6064. return []
  6065. if name.endswith("e_score_correction_bias"):
  6066. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  6067. new_name = self.map_tensor_name(name)
  6068. return [(new_name, data_torch)]
  6069. def prepare_tensors(self):
  6070. super().prepare_tensors()
  6071. if self._experts is not None:
  6072. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6073. experts = [k for d in self._experts for k in d.keys()]
  6074. if len(experts) > 0:
  6075. raise ValueError(f"Unprocessed experts: {experts}")
  6076. @ModelBase.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
  6077. class ChatGLMModel(TextModel):
  6078. model_arch = gguf.MODEL_ARCH.CHATGLM
  6079. def set_vocab_chatglm3(self):
  6080. dir_model = self.dir_model
  6081. hparams = self.hparams
  6082. tokens: list[bytes] = []
  6083. toktypes: list[int] = []
  6084. scores: list[float] = []
  6085. from transformers import AutoTokenizer
  6086. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  6087. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  6088. assert max(tokenizer.get_vocab().values()) < vocab_size
  6089. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  6090. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  6091. for token_id in range(vocab_size):
  6092. piece = tokenizer._convert_id_to_token(token_id)
  6093. if token_id == 0:
  6094. piece = "<unk>"
  6095. elif token_id == 1:
  6096. piece = "<bos>"
  6097. elif token_id == 2:
  6098. piece = "<eos>"
  6099. text = piece.encode("utf-8")
  6100. score = 0.0
  6101. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  6102. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  6103. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  6104. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  6105. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  6106. if piece in special_tokens:
  6107. toktype = SentencePieceTokenTypes.CONTROL
  6108. elif len(piece) == 0:
  6109. text = f"[PAD{token_id}]".encode("utf-8")
  6110. toktype = SentencePieceTokenTypes.UNUSED
  6111. else:
  6112. toktype = SentencePieceTokenTypes.USER_DEFINED
  6113. tokens.append(text)
  6114. scores.append(score)
  6115. toktypes.append(toktype)
  6116. continue
  6117. toktype = SentencePieceTokenTypes.NORMAL
  6118. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  6119. toktype = SentencePieceTokenTypes.UNKNOWN
  6120. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  6121. toktype = SentencePieceTokenTypes.CONTROL
  6122. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  6123. toktype = SentencePieceTokenTypes.UNUSED
  6124. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  6125. toktype = SentencePieceTokenTypes.BYTE
  6126. tokens.append(text)
  6127. scores.append(score)
  6128. toktypes.append(toktype)
  6129. self.gguf_writer.add_tokenizer_model("llama")
  6130. # glm3 needs prefix and suffix formatted as:
  6131. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  6132. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  6133. self.gguf_writer.add_token_list(tokens)
  6134. self.gguf_writer.add_token_scores(scores)
  6135. self.gguf_writer.add_token_types(toktypes)
  6136. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  6137. special_vocab.add_to_gguf(self.gguf_writer)
  6138. @staticmethod
  6139. def token_bytes_to_string(b):
  6140. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  6141. byte_encoder = bytes_to_unicode()
  6142. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  6143. @staticmethod
  6144. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  6145. parts = [bytes([b]) for b in token]
  6146. while True:
  6147. min_idx = None
  6148. min_rank = None
  6149. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  6150. rank = mergeable_ranks.get(pair[0] + pair[1])
  6151. if rank is not None and (min_rank is None or rank < min_rank):
  6152. min_idx = i
  6153. min_rank = rank
  6154. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  6155. break
  6156. assert min_idx is not None
  6157. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  6158. return parts
  6159. def set_vocab(self):
  6160. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  6161. self.set_vocab_chatglm3()
  6162. return
  6163. dir_model = self.dir_model
  6164. hparams = self.hparams
  6165. tokens: list[str] = []
  6166. toktypes: list[int] = []
  6167. from transformers import AutoTokenizer
  6168. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  6169. vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"])
  6170. assert max(tokenizer.get_vocab().values()) < vocab_size
  6171. tokens, toktypes, tokpre = self.get_vocab_base()
  6172. self.gguf_writer.add_tokenizer_model("gpt2")
  6173. self.gguf_writer.add_tokenizer_pre(tokpre)
  6174. self.gguf_writer.add_token_list(tokens)
  6175. self.gguf_writer.add_token_types(toktypes)
  6176. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  6177. # only add special tokens when they were not already loaded from config.json
  6178. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  6179. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  6180. # this one is usually not in config.json anyway
  6181. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  6182. special_vocab.add_to_gguf(self.gguf_writer)
  6183. def set_gguf_parameters(self):
  6184. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  6185. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  6186. n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head))
  6187. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  6188. self.gguf_writer.add_embedding_length(n_embed)
  6189. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed)))
  6190. self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"]))
  6191. self.gguf_writer.add_head_count(n_head)
  6192. self.gguf_writer.add_head_count_kv(n_head_kv)
  6193. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5))
  6194. self.gguf_writer.add_file_type(self.ftype)
  6195. if "attention_dim" in self.hparams:
  6196. rope_dim = self.hparams["attention_dim"]
  6197. else:
  6198. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6199. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  6200. self.gguf_writer.add_add_bos_token(False)
  6201. rope_freq = 10000
  6202. if "rope_ratio" in self.hparams:
  6203. rope_freq = rope_freq * self.hparams["rope_ratio"]
  6204. self.gguf_writer.add_rope_freq_base(rope_freq)
  6205. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6206. del bid # unused
  6207. if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."):
  6208. return []
  6209. name = name.removeprefix("transformer.")
  6210. return [(self.map_tensor_name(name), data_torch)]
  6211. @ModelBase.register("NemotronForCausalLM")
  6212. class NemotronModel(TextModel):
  6213. model_arch = gguf.MODEL_ARCH.NEMOTRON
  6214. def set_vocab(self):
  6215. self._set_vocab_sentencepiece()
  6216. self.gguf_writer.add_pad_token_id(0)
  6217. self.gguf_writer.add_unk_token_id(1)
  6218. def set_gguf_parameters(self):
  6219. super().set_gguf_parameters()
  6220. hparams = self.hparams
  6221. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6222. f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
  6223. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  6224. # * Partial RoPE
  6225. rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
  6226. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  6227. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  6228. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  6229. # * RopeScaling for Nemotron
  6230. if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
  6231. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6232. else:
  6233. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6234. self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
  6235. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6236. # * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
  6237. # model.layers.{l}.input_layernorm.weight
  6238. # model.layers.{l}.post_attention_layernorm.weight
  6239. # model.norm.weight
  6240. if name.endswith("norm.weight"):
  6241. data_torch = data_torch + 1
  6242. return [(self.map_tensor_name(name), data_torch)]
  6243. @ModelBase.register("ExaoneForCausalLM")
  6244. class ExaoneModel(TextModel):
  6245. model_arch = gguf.MODEL_ARCH.EXAONE
  6246. def set_gguf_parameters(self):
  6247. hparams = self.hparams
  6248. assert (hparams["activation_function"] == "silu")
  6249. max_position_embeddings = hparams["max_position_embeddings"]
  6250. embed_dim = hparams["hidden_size"]
  6251. num_heads = hparams["num_attention_heads"]
  6252. num_kv_heads = hparams.get("num_key_value_heads", num_heads)
  6253. layer_norm_eps = hparams["layer_norm_epsilon"]
  6254. intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
  6255. num_layers = hparams["num_layers"]
  6256. # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
  6257. # attention_dropout_rate = hparams["attention_dropout"]
  6258. # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
  6259. # embed_dropout_rate = hparams["embed_dropout"]
  6260. self.gguf_writer.add_embedding_length(embed_dim)
  6261. self.gguf_writer.add_head_count(num_heads)
  6262. self.gguf_writer.add_head_count_kv(num_kv_heads)
  6263. self.gguf_writer.add_context_length(max_position_embeddings)
  6264. self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
  6265. self.gguf_writer.add_feed_forward_length(intermediate_size)
  6266. self.gguf_writer.add_block_count(num_layers)
  6267. self.gguf_writer.add_file_type(self.ftype)
  6268. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  6269. self.gguf_writer.add_rope_freq_base(rope_theta)
  6270. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
  6271. rotary_factor = rotary_factor if rotary_factor is not None else 1.0
  6272. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  6273. rope_scaling = self.hparams.get("rope_scaling") or {}
  6274. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  6275. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6276. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6277. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6278. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  6279. if rope_scaling.get("rope_type", '').lower() == "llama3":
  6280. base = self.hparams.get("rope_theta", 10000.0)
  6281. if (dim := self.hparams.get("head_dim")) is None:
  6282. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6283. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  6284. factor = rope_scaling.get("factor", 8.0)
  6285. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  6286. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  6287. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  6288. low_freq_wavelen = old_context_len / low_freq_factor
  6289. high_freq_wavelen = old_context_len / high_freq_factor
  6290. assert low_freq_wavelen != high_freq_wavelen
  6291. rope_factors = []
  6292. for freq in freqs:
  6293. wavelen = 2 * math.pi / freq
  6294. if wavelen < high_freq_wavelen:
  6295. rope_factors.append(1)
  6296. elif wavelen > low_freq_wavelen:
  6297. rope_factors.append(factor)
  6298. else:
  6299. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  6300. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  6301. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  6302. @ModelBase.register("Exaone4ForCausalLM")
  6303. class Exaone4Model(TextModel):
  6304. model_arch = gguf.MODEL_ARCH.EXAONE4
  6305. def set_vocab(self):
  6306. tokens, toktypes, tokpre = self.get_vocab_base()
  6307. self.gguf_writer.add_tokenizer_model("gpt2")
  6308. self.gguf_writer.add_tokenizer_pre(tokpre)
  6309. self.gguf_writer.add_token_list(tokens)
  6310. self.gguf_writer.add_token_types(toktypes)
  6311. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  6312. special_vocab.add_to_gguf(self.gguf_writer)
  6313. def set_gguf_parameters(self):
  6314. super().set_gguf_parameters()
  6315. hparams = self.hparams
  6316. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6317. if hparams.get("sliding_window") is not None:
  6318. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  6319. if "layer_types" in hparams:
  6320. self.gguf_writer.add_sliding_window_pattern([t == "sliding_attention" for t in hparams["layer_types"]])
  6321. elif "sliding_window_pattern" in hparams:
  6322. sliding_window_pattern = []
  6323. if isinstance(hparams["sliding_window_pattern"], str): # e.g. LLLG
  6324. for i in range(hparams["num_hidden_layers"]):
  6325. sliding_window_pattern.append(hparams["sliding_window_pattern"][i % len(hparams["sliding_window_pattern"])] == "L")
  6326. if isinstance(hparams["sliding_window_pattern"], int): # e.g. 4
  6327. for i in range(hparams["num_hidden_layers"]):
  6328. sliding_window_pattern.append((i + 1) % hparams["sliding_window_pattern"] != 0)
  6329. if len(sliding_window_pattern) == hparams["num_hidden_layers"]:
  6330. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  6331. rope_scaling = self.hparams.get("rope_scaling") or {}
  6332. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  6333. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6334. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6335. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6336. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  6337. if rope_scaling.get("rope_type", '').lower() == "llama3":
  6338. base = self.hparams.get("rope_theta", 10_000.0)
  6339. if (dim := self.hparams.get("head_dim")) is None:
  6340. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6341. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  6342. factor = rope_scaling.get("factor", 16.0)
  6343. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  6344. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  6345. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  6346. low_freq_wavelen = old_context_len / low_freq_factor
  6347. high_freq_wavelen = old_context_len / high_freq_factor
  6348. rope_factors = []
  6349. for freq in freqs:
  6350. wavelen = 2 * math.pi / freq
  6351. if wavelen < high_freq_wavelen:
  6352. rope_factors.append(1)
  6353. elif wavelen > low_freq_wavelen:
  6354. rope_factors.append(factor)
  6355. else:
  6356. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  6357. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  6358. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  6359. @ModelBase.register("GraniteForCausalLM")
  6360. class GraniteModel(LlamaModel):
  6361. """Conversion for IBM's GraniteForCausalLM"""
  6362. model_arch = gguf.MODEL_ARCH.GRANITE
  6363. def set_gguf_parameters(self):
  6364. """Granite uses standard llama parameters with the following differences:
  6365. - No head_dim support
  6366. - New multiplier params:
  6367. - attention_scale
  6368. - embedding_scale
  6369. - residual_scale
  6370. - logits_scaling
  6371. """
  6372. if head_dim := self.hparams.pop("head_dim", None):
  6373. logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
  6374. super().set_gguf_parameters()
  6375. # NOTE: Convert _multiplier params to _scale params for naming
  6376. # consistency
  6377. if attention_scale := self.hparams.get("attention_multiplier"):
  6378. self.gguf_writer.add_attention_scale(attention_scale)
  6379. logger.info("gguf: (granite) attention_scale = %s", attention_scale)
  6380. if embedding_scale := self.hparams.get("embedding_multiplier"):
  6381. self.gguf_writer.add_embedding_scale(embedding_scale)
  6382. logger.info("gguf: (granite) embedding_scale = %s", embedding_scale)
  6383. if residual_scale := self.hparams.get("residual_multiplier"):
  6384. self.gguf_writer.add_residual_scale(residual_scale)
  6385. logger.info("gguf: (granite) residual_scale = %s", residual_scale)
  6386. if logits_scale := self.hparams.get("logits_scaling"):
  6387. self.gguf_writer.add_logit_scale(logits_scale)
  6388. logger.info("gguf: (granite) logits_scale = %s", logits_scale)
  6389. @ModelBase.register("GraniteMoeForCausalLM", "GraniteMoeSharedForCausalLM")
  6390. class GraniteMoeModel(GraniteModel):
  6391. """Conversion for IBM's GraniteMoeForCausalLM"""
  6392. model_arch = gguf.MODEL_ARCH.GRANITE_MOE
  6393. def set_gguf_parameters(self):
  6394. """GraniteMoeShared uses GraniteMoe parameters plus the following:
  6395. - shared_intermediate_size
  6396. """
  6397. super().set_gguf_parameters()
  6398. if shared_feed_forward_length := self.hparams.get("shared_intermediate_size"):
  6399. self.gguf_writer.add_expert_shared_feed_forward_length(shared_feed_forward_length)
  6400. logger.info("gguf: (granitemoeshared) shared_feed_forward_length = %s", shared_feed_forward_length)
  6401. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6402. """In modeling_granitemoe, the JetMoe implementation of parallel experts
  6403. is used. This essentially merges w1 and w3 into a single tensor with 2x
  6404. the hidden size that is then split during forward. To keep compatibility
  6405. with existing mixtral support, we pull them apart here.
  6406. """
  6407. if name.endswith("block_sparse_moe.input_linear.weight"):
  6408. ffn_dim = self.hparams["intermediate_size"]
  6409. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size"
  6410. gate, up = data_torch.split(ffn_dim, dim=-2)
  6411. return [
  6412. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate),
  6413. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up),
  6414. ]
  6415. has_experts = bool(self.hparams.get('num_local_experts'))
  6416. if name.endswith("shared_mlp.input_linear.weight"):
  6417. ffn_dim = self.hparams["shared_intermediate_size"]
  6418. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * shared_intermediate_size"
  6419. gate, up = data_torch.split(ffn_dim, dim=-2)
  6420. if has_experts:
  6421. return [
  6422. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), gate),
  6423. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), up),
  6424. ]
  6425. return [
  6426. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), gate),
  6427. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), up),
  6428. ]
  6429. if not has_experts and name.endswith("shared_mlp.output_linear.weight"):
  6430. return [
  6431. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, bid), data_torch)
  6432. ]
  6433. return super().modify_tensors(data_torch, name, bid)
  6434. @ModelBase.register("GraniteMoeHybridForCausalLM", "BambaForCausalLM")
  6435. class GraniteHybridModel(Mamba2Model, GraniteMoeModel):
  6436. """GraniteHybrid is a hybrid SSM + Attention model that uses Mamba2 SSM
  6437. layers and optionally uses MoE w/ a shared expert"""
  6438. model_arch = gguf.MODEL_ARCH.GRANITE_HYBRID
  6439. undo_permute = True
  6440. def __init__(self, *args, **kwargs):
  6441. # Hybrid mamba models use a prefix for the mamba-specific params.
  6442. # TODO: Extend this if the prefix(es) need to be configurable
  6443. self.hparam_prefixes = ["mamba"]
  6444. super().__init__(*args, **kwargs)
  6445. # Lists of which layers use ssm vs attention
  6446. self._attn_layers = self.get_attn_layers()
  6447. self._ssm_layers = [
  6448. i for i in range(self.block_count)
  6449. if i not in self._attn_layers
  6450. ]
  6451. # There are some models in this family that are non-hybrid, but keep the
  6452. # same parent class by setting all layers to "attention." If this is the
  6453. # case, the model architecture needs to be updated to a standard
  6454. # "granite" or "granitemoe" model
  6455. if not self._ssm_layers:
  6456. has_experts = self.find_hparam(["num_experts_per_tok"], optional=True)
  6457. new_arch = (
  6458. gguf.MODEL_ARCH.GRANITE_MOE
  6459. if has_experts else
  6460. gguf.MODEL_ARCH.GRANITE
  6461. )
  6462. self.model_arch = new_arch
  6463. self.gguf_writer.arch = gguf.MODEL_ARCH_NAMES[new_arch]
  6464. self.gguf_writer.add_architecture()
  6465. # n_group and d_inner are used during reshape_tensors for mamba2
  6466. # NOTE: Explicitly include hparam prefix prefix for d_model to
  6467. # disambiguate with top-level head_dim
  6468. # NOTE 2: If needed for future models, this can be isolated in a method
  6469. # to separate the prefix setting and teh keys used
  6470. self.d_model = self.find_hparam([f"{self.hparam_prefixes[0]}_head_dim", "hidden_size", "d_model"])
  6471. self.n_group = self.find_hparam(["n_groups", "num_groups"])
  6472. self.d_inner = self.find_hparam(["expand", "num_heads"]) * self.d_model
  6473. def get_attn_layers(self):
  6474. # Explicit list of layer type names
  6475. if layer_types := self.hparams.get("layer_types"):
  6476. return [
  6477. i for i, typ in enumerate(layer_types)
  6478. if typ == "attention"
  6479. ]
  6480. # Layer types indicated by index or period
  6481. attn_layers = self.hparams.get("attn_layer_indices", [])
  6482. if not attn_layers:
  6483. attn_period = self.hparams.get("attn_layer_period")
  6484. assert attn_period, "Didn't find attn_layer_indices or attn_layer_period"
  6485. attn_offset = self.hparams.get("attn_layer_offset")
  6486. assert attn_offset is not None, "No attention layer offset set with attn_layer_period"
  6487. attn_layers = [
  6488. i for i in range(self.block_count)
  6489. if i % attn_period == attn_offset
  6490. ]
  6491. return attn_layers
  6492. def find_hparam(self, keys: Iterable[str], *args, **kwargs) -> Any:
  6493. prefixed = []
  6494. for pfx in self.hparam_prefixes:
  6495. prefixed.extend(
  6496. "_".join([pfx, k])
  6497. for k in keys
  6498. )
  6499. keys = list(keys) + prefixed
  6500. return Mamba2Model.find_hparam(self, keys, *args, **kwargs)
  6501. def modify_tensors(
  6502. self, data_torch: Tensor, name: str, bid: int | None
  6503. ) -> Iterable[tuple[str, Tensor]]:
  6504. if (
  6505. name.endswith("block_sparse_moe.input_linear.weight")
  6506. or "shared_mlp" in name
  6507. ):
  6508. return GraniteMoeModel.modify_tensors(self, data_torch, name, bid)
  6509. # Determine whether this is a mamba layer or an attention layer
  6510. if bid in self._ssm_layers:
  6511. return Mamba2Model.modify_tensors(self, data_torch, name, bid)
  6512. elif bid in self._attn_layers:
  6513. return GraniteMoeModel.modify_tensors(self, data_torch, name, bid)
  6514. return [(self.map_tensor_name(name), data_torch)]
  6515. def set_gguf_parameters(self):
  6516. """This method merges params from both parents and some that are
  6517. specific to this model. The result is some duplication of how the params
  6518. get set. The following warnings are expected during conversion:
  6519. WARNING:Duplicated key name 'granitehybrid.attention.head_count_kv'
  6520. WARNING:Duplicated key name 'granitehybrid.context_length'
  6521. """
  6522. GraniteMoeModel.set_gguf_parameters(self)
  6523. ## Mamba mixer params ##
  6524. self.gguf_writer.add_ssm_conv_kernel(self.find_hparam(["conv_kernel", "d_conv"]))
  6525. self.gguf_writer.add_ssm_state_size(self.find_hparam(["state_size", "d_state", "state_dim", "ssm_state_size"]))
  6526. self.gguf_writer.add_ssm_group_count(self.n_group)
  6527. self.gguf_writer.add_ssm_inner_size(self.d_inner)
  6528. # NOTE: The mamba_dt_rank is _not_ the right field for how this is used
  6529. # in llama.cpp
  6530. self.gguf_writer.add_ssm_time_step_rank(self.find_hparam(["n_heads", "num_heads"]))
  6531. ## Attention params ##
  6532. head_count_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  6533. head_count_kv_vec = [
  6534. head_count_kv if i in self._attn_layers else 0 for i in range(self.block_count)
  6535. ]
  6536. if rope_dim := self.hparams.get("attn_rotary_emb"):
  6537. self.gguf_writer.add_rope_dimension_count(rope_dim)
  6538. self.gguf_writer.add_head_count_kv(head_count_kv_vec)
  6539. ## If Bamba or non-hybrid, use rope, otherwise don't
  6540. use_rope = (
  6541. "BambaForCausalLM" in self.hparams["architectures"]
  6542. or not self._ssm_layers
  6543. )
  6544. self.gguf_writer.add_rope_scaling_finetuned(use_rope)
  6545. if not use_rope:
  6546. self.gguf_writer.add_context_length(2**20)
  6547. ## Validation ##
  6548. d_head = self.find_hparam(["d_head"], optional=True) or 64
  6549. assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
  6550. assert self.d_inner % d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {d_head}"
  6551. def set_vocab(self):
  6552. self.hparams["pad_vocab_size_multiple"] = 8
  6553. Mamba2Model.set_vocab(self)
  6554. @ModelBase.register("NemotronHForCausalLM")
  6555. class NemotronHModel(GraniteHybridModel):
  6556. """Hybrid mamba2/attention model from NVIDIA"""
  6557. model_arch = gguf.MODEL_ARCH.NEMOTRON_H
  6558. def __init__(self, *args, **kwargs):
  6559. super().__init__(*args, **kwargs)
  6560. # Save the top-level head_dim for later
  6561. self.head_dim = self.hparams.get("head_dim", self.hparams.get("attention_head_dim"))
  6562. assert self.head_dim is not None, "Could not find the attention head dim in config"
  6563. # Don't use expand to calculate d_inner
  6564. self.d_inner = self.find_hparam(["num_heads"]) * self.d_model
  6565. # Update the ssm / attn / mlp layers
  6566. # M: Mamba2, *: Attention, -: MLP
  6567. hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
  6568. self._ssm_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "M"]
  6569. self._mlp_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "-"]
  6570. def get_attn_layers(self):
  6571. hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
  6572. assert len(hybrid_override_pattern) == self.block_count, "Mismatch between hybrid override and num_hidden_layers!"
  6573. return [i for i, val in enumerate(hybrid_override_pattern) if val == "*"]
  6574. def set_gguf_parameters(self):
  6575. super().set_gguf_parameters()
  6576. self.gguf_writer.add_key_length(self.head_dim)
  6577. self.gguf_writer.add_value_length(self.head_dim)
  6578. # Set feed_forward_length
  6579. # NOTE: This will trigger an override warning. This is preferrable to
  6580. # duplicating all the parent logic
  6581. n_ff = self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"])
  6582. self.gguf_writer.add_feed_forward_length([
  6583. n_ff if i in self._mlp_layers else 0 for i in range(self.block_count)
  6584. ])
  6585. def set_vocab(self):
  6586. super().set_vocab()
  6587. # The tokenizer _does_ add a BOS token (via post_processor type
  6588. # TemplateProcessing) but does not set add_bos_token to true in the
  6589. # config, so we need to explicitly override it here.
  6590. self.gguf_writer.add_add_bos_token(True)
  6591. @ModelBase.register("BailingMoeForCausalLM")
  6592. class BailingMoeModel(TextModel):
  6593. model_arch = gguf.MODEL_ARCH.BAILINGMOE
  6594. def set_vocab(self):
  6595. self._set_vocab_gpt2()
  6596. def set_gguf_parameters(self):
  6597. super().set_gguf_parameters()
  6598. hparams = self.hparams
  6599. if (rope_dim := hparams.get("head_dim")) is None:
  6600. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  6601. self.gguf_writer.add_rope_dimension_count(rope_dim)
  6602. rope_scaling = self.hparams.get("rope_scaling") or {}
  6603. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  6604. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  6605. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6606. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  6607. else:
  6608. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6609. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  6610. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6611. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  6612. self.gguf_writer.add_expert_weights_scale(1.0)
  6613. self.gguf_writer.add_expert_count(hparams["num_experts"])
  6614. self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
  6615. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  6616. _experts: list[dict[str, Tensor]] | None = None
  6617. @staticmethod
  6618. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  6619. if n_head_kv is not None and n_head != n_head_kv:
  6620. n_head = n_head_kv
  6621. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  6622. .swapaxes(1, 2)
  6623. .reshape(weights.shape))
  6624. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6625. n_head = self.hparams["num_attention_heads"]
  6626. n_kv_head = self.hparams.get("num_key_value_heads")
  6627. n_embd = self.hparams["hidden_size"]
  6628. if (head_dim := self.hparams.get("head_dim")) is None:
  6629. head_dim = n_embd // n_head
  6630. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  6631. if name.endswith("attention.dense.weight"):
  6632. return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), data_torch)]
  6633. elif name.endswith("query_key_value.weight"):
  6634. q, k, v = data_torch.split([n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim], dim=-2)
  6635. return [
  6636. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), BailingMoeModel.permute(q, n_head, n_head)),
  6637. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), BailingMoeModel.permute(k, n_head, n_kv_head)),
  6638. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v)
  6639. ]
  6640. elif name.find("mlp.experts") != -1:
  6641. n_experts = self.hparams["num_experts"]
  6642. assert bid is not None
  6643. tensors: list[tuple[str, Tensor]] = []
  6644. if self._experts is None:
  6645. self._experts = [{} for _ in range(self.block_count)]
  6646. self._experts[bid][name] = data_torch
  6647. if len(self._experts[bid]) >= n_experts * 3:
  6648. # merge the experts into a single 3d tensor
  6649. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6650. datas: list[Tensor] = []
  6651. for xid in range(n_experts):
  6652. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6653. datas.append(self._experts[bid][ename])
  6654. del self._experts[bid][ename]
  6655. data_torch = torch.stack(datas, dim=0)
  6656. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6657. new_name = self.map_tensor_name(merged_name)
  6658. tensors.append((new_name, data_torch))
  6659. return tensors
  6660. new_name = self.map_tensor_name(name)
  6661. if new_name == output_name and self.hparams.get("norm_head"):
  6662. data_torch = data_torch.float()
  6663. data_torch /= torch.norm(data_torch, p=2, dim=0, keepdim=True) + 1e-7
  6664. return [(new_name, data_torch)]
  6665. def prepare_tensors(self):
  6666. super().prepare_tensors()
  6667. if self._experts is not None:
  6668. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6669. experts = [k for d in self._experts for k in d.keys()]
  6670. if len(experts) > 0:
  6671. raise ValueError(f"Unprocessed experts: {experts}")
  6672. @ModelBase.register("BailingMoeV2ForCausalLM")
  6673. class BailingMoeV2Model(TextModel):
  6674. model_arch = gguf.MODEL_ARCH.BAILINGMOE2
  6675. def __init__(self, *args, **kwargs):
  6676. super().__init__(*args, **kwargs)
  6677. if nextn_layers := self.hparams.get("num_nextn_predict_layers", 0):
  6678. self.block_count = self.hparams["num_hidden_layers"] + nextn_layers
  6679. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  6680. def set_vocab(self):
  6681. self._set_vocab_gpt2()
  6682. def set_gguf_parameters(self):
  6683. super().set_gguf_parameters()
  6684. hparams = self.hparams
  6685. if (rope_dim := hparams.get("head_dim")) is None:
  6686. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  6687. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  6688. rope_scaling = self.hparams.get("rope_scaling") or {}
  6689. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  6690. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  6691. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6692. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  6693. else:
  6694. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6695. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  6696. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6697. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  6698. self.gguf_writer.add_expert_shared_feed_forward_length(hparams.get("moe_shared_expert_intermediate_size", hparams["moe_intermediate_size"] * hparams["num_shared_experts"]))
  6699. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  6700. self.gguf_writer.add_expert_count(hparams["num_experts"])
  6701. self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
  6702. self.gguf_writer.add_expert_group_count(hparams["n_group"])
  6703. self.gguf_writer.add_expert_group_used_count(hparams["topk_group"])
  6704. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  6705. if hparams["score_function"] == "sigmoid":
  6706. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  6707. elif hparams["score_function"] == "softmax":
  6708. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  6709. else:
  6710. raise ValueError(f"Unsupported score_function value: {hparams['score_function']}")
  6711. if (nextn_layers := self.hparams.get("num_nextn_predict_layers")) is not None:
  6712. self.gguf_writer.add_nextn_predict_layers(nextn_layers)
  6713. _experts: list[dict[str, Tensor]] | None = None
  6714. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6715. if "mlp.experts" in name:
  6716. n_experts = self.hparams["num_experts"]
  6717. assert bid is not None
  6718. tensors: list[tuple[str, Tensor]] = []
  6719. if self._experts is None:
  6720. self._experts = [{} for _ in range(self.block_count)]
  6721. self._experts[bid][name] = data_torch
  6722. if len(self._experts[bid]) >= n_experts * 3:
  6723. # merge the experts into a single 3d tensor
  6724. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6725. datas: list[Tensor] = []
  6726. for xid in range(n_experts):
  6727. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6728. datas.append(self._experts[bid][ename])
  6729. del self._experts[bid][ename]
  6730. data_torch = torch.stack(datas, dim=0)
  6731. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6732. new_name = self.map_tensor_name(merged_name)
  6733. tensors.append((new_name, data_torch))
  6734. return tensors
  6735. if name.endswith(".expert_bias"):
  6736. name = name.replace(".expert_bias", ".expert_bias.bias")
  6737. return [(self.map_tensor_name(name), data_torch)]
  6738. def prepare_tensors(self):
  6739. super().prepare_tensors()
  6740. if self._experts is not None:
  6741. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6742. experts = [k for d in self._experts for k in d.keys()]
  6743. if len(experts) > 0:
  6744. raise ValueError(f"Unprocessed experts: {experts}")
  6745. @ModelBase.register("GroveMoeForCausalLM", "modeling_grove_moe.GroveMoeForCausalLM")
  6746. class GroveMoeModel(TextModel):
  6747. model_arch = gguf.MODEL_ARCH.GROVEMOE
  6748. def set_gguf_parameters(self):
  6749. super().set_gguf_parameters()
  6750. if (n_experts := self.hparams.get("num_experts")) is not None:
  6751. self.gguf_writer.add_expert_count(n_experts)
  6752. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  6753. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  6754. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  6755. # FIXME?: Hardcoded https://huggingface.co/inclusionAI/GroveMoE-Inst/blob/c4c69e5970d18907b5e6ddccdfd55176fe292df1/modeling_grove_moe.py#L299
  6756. self.gguf_writer.add_expert_chunk_feed_forward_length(self.hparams.get("head_dim") or 128)
  6757. # FIXME?: Hardcoded https://huggingface.co/inclusionAI/GroveMoE-Inst/blob/c4c69e5970d18907b5e6ddccdfd55176fe292df1/modeling_grove_moe.py#L298
  6758. self.gguf_writer.add_experts_per_group(2)
  6759. # FIXME?: Hardcoded https://huggingface.co/inclusionAI/GroveMoE-Inst/blob/c4c69e5970d18907b5e6ddccdfd55176fe292df1/modeling_grove_moe.py#L376
  6760. self.gguf_writer.add_expert_group_scale(0.05)
  6761. # YaRN is not enabled by default
  6762. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  6763. rope_scaling = self.hparams.get("rope_scaling") or {}
  6764. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  6765. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  6766. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6767. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  6768. _experts: list[dict[str, Tensor]] | None = None
  6769. _chunk_experts: list[dict[str, Tensor]] | None = None
  6770. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6771. if name.endswith(".expert_bias"):
  6772. # FIXME?: Unused https://huggingface.co/inclusionAI/GroveMoE-Inst/blob/c4c69e5970d18907b5e6ddccdfd55176fe292df1/modeling_grove_moe.py#L303
  6773. return []
  6774. # process the experts separately
  6775. if name.find("chunk_experts") != -1:
  6776. n_experts = self.hparams["num_experts"] // 2 # see add_experts_per_group
  6777. assert bid is not None
  6778. if self._chunk_experts is None:
  6779. self._chunk_experts = [{} for _ in range(self.block_count)]
  6780. self._chunk_experts[bid][name] = data_torch
  6781. if len(self._chunk_experts[bid]) >= n_experts * 3:
  6782. tensors: list[tuple[str, Tensor]] = []
  6783. # merge the experts into a single 3d tensor
  6784. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6785. datas: list[Tensor] = []
  6786. for xid in range(n_experts):
  6787. ename = f"model.layers.{bid}.mlp.chunk_experts.{xid}.{w_name}.weight"
  6788. datas.append(self._chunk_experts[bid][ename])
  6789. del self._chunk_experts[bid][ename]
  6790. data_torch = torch.stack(datas, dim=0)
  6791. merged_name = f"model.layers.{bid}.mlp.chunk_experts.{w_name}.weight"
  6792. new_name = self.map_tensor_name(merged_name)
  6793. tensors.append((new_name, data_torch))
  6794. return tensors
  6795. else:
  6796. return []
  6797. elif name.find("experts") != -1:
  6798. n_experts = self.hparams["num_experts"]
  6799. assert bid is not None
  6800. if self._experts is None:
  6801. self._experts = [{} for _ in range(self.block_count)]
  6802. self._experts[bid][name] = data_torch
  6803. if len(self._experts[bid]) >= n_experts * 3:
  6804. tensors: list[tuple[str, Tensor]] = []
  6805. # merge the experts into a single 3d tensor
  6806. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6807. datas: list[Tensor] = []
  6808. for xid in range(n_experts):
  6809. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6810. datas.append(self._experts[bid][ename])
  6811. del self._experts[bid][ename]
  6812. data_torch = torch.stack(datas, dim=0)
  6813. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6814. new_name = self.map_tensor_name(merged_name)
  6815. tensors.append((new_name, data_torch))
  6816. return tensors
  6817. else:
  6818. return []
  6819. return [(self.map_tensor_name(name), data_torch)]
  6820. def prepare_tensors(self):
  6821. super().prepare_tensors()
  6822. if self._chunk_experts is not None:
  6823. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6824. chunk_experts = [k for d in self._chunk_experts for k in d.keys()]
  6825. if len(chunk_experts) > 0:
  6826. raise ValueError(f"Unprocessed adjugate experts: {chunk_experts}")
  6827. if self._experts is not None:
  6828. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6829. experts = [k for d in self._experts for k in d.keys()]
  6830. if len(experts) > 0:
  6831. raise ValueError(f"Unprocessed experts: {experts}")
  6832. @ModelBase.register("ChameleonForConditionalGeneration")
  6833. @ModelBase.register("ChameleonForCausalLM") # obsolete
  6834. class ChameleonModel(TextModel):
  6835. model_arch = gguf.MODEL_ARCH.CHAMELEON
  6836. def set_gguf_parameters(self):
  6837. super().set_gguf_parameters()
  6838. self.gguf_writer.add_swin_norm(self.hparams.get("swin_norm", False))
  6839. def set_vocab(self):
  6840. self._set_vocab_gpt2()
  6841. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6842. # ignore image tokenizer for now
  6843. # TODO: remove this once image support is implemented for Chameleon
  6844. if name.startswith("model.vqmodel"):
  6845. return []
  6846. n_head = self.hparams["num_attention_heads"]
  6847. n_kv_head = self.hparams.get("num_key_value_heads")
  6848. hidden_dim = self.hparams.get("hidden_size")
  6849. if name.endswith(("q_proj.weight", "q_proj.bias")):
  6850. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  6851. if name.endswith(("k_proj.weight", "k_proj.bias")):
  6852. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  6853. if name.endswith(("q_norm.weight", "q_norm.bias")):
  6854. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim)
  6855. if name.endswith(("k_norm.weight", "k_norm.bias")):
  6856. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim)
  6857. return [(self.map_tensor_name(name), data_torch)]
  6858. # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203
  6859. @staticmethod
  6860. def _reverse_hf_permute(data_torch, n_heads, hidden_dim):
  6861. head_dim = hidden_dim // n_heads
  6862. data_torch = data_torch[0].view(2, head_dim // 2).t().reshape(1, -1)
  6863. data_torch = data_torch.repeat_interleave(n_heads, 0)
  6864. return data_torch
  6865. @ModelBase.register("UltravoxModel")
  6866. class UltravoxModel(TextModel):
  6867. model_arch = gguf.MODEL_ARCH.LLAMA # dummy
  6868. def __init__(self, *args, **kwargs):
  6869. super().__init__(*args, **kwargs)
  6870. raise NotImplementedError("Ultravox does not have text decoder. Instead, it uses Llama or other models for text. If you want to get the audio encoder, please use --mmproj argument")
  6871. @ModelBase.register("Qwen2AudioForConditionalGeneration")
  6872. class WhisperEncoderModel(MmprojModel):
  6873. has_vision_encoder = False # no vision encoder
  6874. has_audio_encoder = True
  6875. def __init__(self, *args, **kwargs):
  6876. super().__init__(*args, **kwargs)
  6877. if "hidden_size" not in self.hparams and "intermediate_size" not in self.hparams:
  6878. self.hparams["hidden_size"] = self.hparams["d_model"]
  6879. self.hparams["intermediate_size"] = self.hparams["encoder_ffn_dim"]
  6880. self.hparams["num_attention_heads"] = self.hparams["encoder_attention_heads"]
  6881. def set_gguf_parameters(self):
  6882. super().set_gguf_parameters()
  6883. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2A)
  6884. self.gguf_writer.add_audio_num_mel_bins(self.hparams["num_mel_bins"])
  6885. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  6886. def tensor_force_quant(self, name, new_name, bid, n_dims):
  6887. if ".conv" in name and ".weight" in name:
  6888. return gguf.GGMLQuantizationType.F16
  6889. return super().tensor_force_quant(name, new_name, bid, n_dims)
  6890. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6891. del bid # unused
  6892. if name.startswith("language_model."):
  6893. # skip language model tensors
  6894. return []
  6895. # prevent clash naming with vision tensors
  6896. if name.startswith("multi_modal_projector"):
  6897. name = "audio." + name
  6898. if "conv1.bias" in name or "conv2.bias" in name:
  6899. # transpose conv1 and conv2 bias
  6900. data_torch = data_torch.unsqueeze(-1)
  6901. return [(self.map_tensor_name(name), data_torch)]
  6902. @ModelBase.register("UltravoxModel")
  6903. class UltravoxWhisperEncoderModel(WhisperEncoderModel):
  6904. has_vision_encoder = False # no vision encoder
  6905. has_audio_encoder = True
  6906. def set_gguf_parameters(self):
  6907. super().set_gguf_parameters()
  6908. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.ULTRAVOX)
  6909. self.gguf_writer.add_audio_stack_factor(self.global_config["stack_factor"])
  6910. @ModelBase.register("VoxtralForConditionalGeneration")
  6911. class VoxtralWhisperEncoderModel(WhisperEncoderModel):
  6912. has_vision_encoder = False # no vision encoder
  6913. has_audio_encoder = True
  6914. def set_gguf_parameters(self):
  6915. super().set_gguf_parameters()
  6916. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.VOXTRAL)
  6917. self.gguf_writer.add_audio_stack_factor(4) # == intermediate_size // hidden_size
  6918. @ModelBase.register("FalconH1ForCausalLM")
  6919. class FalconH1Model(Mamba2Model):
  6920. model_arch = gguf.MODEL_ARCH.FALCON_H1
  6921. def __init__(self, *args, **kwargs):
  6922. # Set the hparam prefixes for Falcon Mamba2
  6923. self.hparam_prefixes = ["mamba"]
  6924. # Initialize the base Mamba2Model
  6925. super().__init__(*args, **kwargs)
  6926. # Use Llama conversion for attention
  6927. self._transformer_model_class = LlamaModel
  6928. # n_group and d_inner are used during reshape_tensors for mamba2
  6929. self.n_group = self.find_hparam(["n_groups"])
  6930. self.d_inner = self.find_hparam(["mamba_d_ssm"])
  6931. self.d_head = self.find_hparam(["d_head"])
  6932. # Initialize any Falcon Mamba2 specific attributes
  6933. self.has_attention = True # Falcon Mamba2 has attention components
  6934. # Load Falcon-H1 multipliers from hyperparameters
  6935. self.attention_in_multiplier = self.find_hparam(["attention_in_multiplier"], optional=True)
  6936. self.attention_out_multiplier = self.find_hparam(["attention_out_multiplier"], optional=True)
  6937. self.ssm_in_multiplier = self.find_hparam(["ssm_in_multiplier"], optional=True)
  6938. self.ssm_out_multiplier = self.find_hparam(["ssm_out_multiplier"], optional=True)
  6939. self.mlp_multipliers = self.find_hparam(["mlp_multipliers"], optional=True)
  6940. self.ssm_multipliers = self.find_hparam(["ssm_multipliers"], optional=True)
  6941. self.intermediate_size = self.find_hparam(["intermediate_size"])
  6942. self.key_multiplier = self.find_hparam(["key_multiplier"], optional=True)
  6943. def find_hparam(self, keys: Iterable[str], *args, **kwargs) -> Any:
  6944. prefixed = []
  6945. for pfx in self.hparam_prefixes:
  6946. prefixed.extend(
  6947. "_".join([pfx, k])
  6948. for k in keys
  6949. )
  6950. keys = list(keys) + prefixed
  6951. return super().find_hparam(keys, *args, **kwargs)
  6952. def set_vocab(self):
  6953. self._set_vocab_gpt2()
  6954. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6955. tensors = list(super().modify_tensors(data_torch, name, bid))
  6956. tensor = tensors[0][1]
  6957. if "down_proj" in name:
  6958. tensor = tensor * self.mlp_multipliers[1]
  6959. elif "gate_proj" in name:
  6960. tensor = tensor * self.mlp_multipliers[0]
  6961. elif "k_proj" in name:
  6962. tensor = tensor * self.key_multiplier * self.attention_in_multiplier
  6963. elif "q_proj" in name:
  6964. tensor = tensor * self.attention_in_multiplier
  6965. elif "v_proj" in name:
  6966. tensor = tensor * self.attention_in_multiplier
  6967. elif "o_proj" in name:
  6968. tensor = tensor * self.attention_out_multiplier
  6969. elif "out_proj" in name:
  6970. tensor = tensor * self.ssm_out_multiplier
  6971. elif "in_proj" in name:
  6972. tensor = tensor * self.ssm_in_multiplier
  6973. zxbcdt_multipliers = self.hparams["ssm_multipliers"]
  6974. intermediate_size = self.hparams["mamba_d_ssm"]
  6975. groups_time_state_size = self.hparams["mamba_n_groups"] * self.hparams["mamba_d_state"]
  6976. tensor[:intermediate_size, :] *= zxbcdt_multipliers[0]
  6977. tensor[intermediate_size:2 * intermediate_size, :] *= zxbcdt_multipliers[1]
  6978. tensor[2 * intermediate_size:2 * intermediate_size + groups_time_state_size, :] *= zxbcdt_multipliers[2]
  6979. tensor[2 * intermediate_size + groups_time_state_size:2 * intermediate_size + 2 * groups_time_state_size, :] *= zxbcdt_multipliers[3]
  6980. tensor[2 * intermediate_size + 2 * groups_time_state_size:, :] *= zxbcdt_multipliers[4]
  6981. elif "lm_head" in name:
  6982. tensor = tensor * self.hparams["lm_head_multiplier"]
  6983. elif "embed_tokens" in name:
  6984. tensor = tensor * self.hparams["embedding_multiplier"]
  6985. elif "mamba.norm" in name:
  6986. tensor = tensor.reshape(self.n_group, self.d_inner // self.n_group)
  6987. tensors = [(tensors[0][0], tensor)]
  6988. return tensors
  6989. def set_gguf_parameters(self):
  6990. super().set_gguf_parameters()
  6991. ## General Params ##
  6992. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  6993. # Override some Mamba2 defaults
  6994. self.gguf_writer.add_block_count(self.block_count)
  6995. self.gguf_writer.add_context_length(self.hparams.get("max_position_embeddings", 0))
  6996. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  6997. ## Attention params ##
  6998. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) # Override value 0 from Mamba2
  6999. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  7000. self.gguf_writer.add_key_length(self.hparams["head_dim"])
  7001. self.gguf_writer.add_value_length(self.hparams["head_dim"])
  7002. ## Validation ##
  7003. assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
  7004. assert self.d_inner % self.d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {self.d_head}"
  7005. # Add any other Falcon Mamba2 specific configuration
  7006. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  7007. @ModelBase.register("HunYuanMoEV1ForCausalLM")
  7008. class HunYuanMoEModel(TextModel):
  7009. model_arch = gguf.MODEL_ARCH.HUNYUAN_MOE
  7010. def set_vocab(self):
  7011. from transformers import AutoTokenizer
  7012. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  7013. # 1. Get the pre-tokenizer identifier hash
  7014. tokpre = self.get_vocab_base_pre(tokenizer)
  7015. # 2. Reverse-engineer the merges list from mergeable_ranks
  7016. merges = []
  7017. vocab = {}
  7018. mergeable_ranks = tokenizer.mergeable_ranks
  7019. for token, rank in mergeable_ranks.items():
  7020. vocab[QwenModel.token_bytes_to_string(token)] = rank
  7021. if len(token) == 1:
  7022. continue
  7023. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  7024. if len(merged) == 2: # todo this is an assert in Qwen, why?
  7025. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  7026. # 3. Generate the tokens and toktypes lists
  7027. vocab_size = self.hparams["vocab_size"]
  7028. assert tokenizer.vocab_size == vocab_size
  7029. special_tokens = tokenizer.special_tokens
  7030. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  7031. tokens: list[str] = []
  7032. toktypes: list[int] = []
  7033. for i in range(vocab_size):
  7034. if i not in reverse_vocab:
  7035. tokens.append(f"[PAD{i}]")
  7036. toktypes.append(gguf.TokenType.UNUSED)
  7037. else:
  7038. token = reverse_vocab[i]
  7039. tokens.append(token)
  7040. if i in special_tokens.values():
  7041. toktypes.append(gguf.TokenType.CONTROL)
  7042. else:
  7043. toktypes.append(gguf.TokenType.NORMAL)
  7044. # 4. Write all vocab-related fields to the GGUF writer
  7045. self.gguf_writer.add_tokenizer_model("gpt2")
  7046. self.gguf_writer.add_tokenizer_pre(tokpre)
  7047. self.gguf_writer.add_token_list(tokens)
  7048. self.gguf_writer.add_token_types(toktypes)
  7049. self.gguf_writer.add_token_merges(merges)
  7050. # 5. Add special tokens and chat templates
  7051. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  7052. special_vocab.add_to_gguf(self.gguf_writer)
  7053. # FIX for BOS token: Overwrite incorrect id read from config.json
  7054. self.gguf_writer.add_bos_token_id(127959) # <|bos|>
  7055. def set_gguf_parameters(self):
  7056. super().set_gguf_parameters()
  7057. hparams = self.hparams
  7058. self.gguf_writer.add_expert_count(hparams["num_experts"])
  7059. self.gguf_writer.add_expert_shared_feed_forward_length(hparams["intermediate_size"])
  7060. moe_intermediate_size = hparams["moe_intermediate_size"]
  7061. assert all(n == moe_intermediate_size[0] for n in moe_intermediate_size)
  7062. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size[0])
  7063. moe_topk = hparams["moe_topk"]
  7064. assert all(topk == moe_topk[0] for topk in moe_topk)
  7065. self.gguf_writer.add_expert_used_count(moe_topk[0])
  7066. moe_shared_expert = hparams["num_shared_expert"]
  7067. assert all(n == moe_shared_expert[0] for n in moe_shared_expert)
  7068. self.gguf_writer.add_expert_shared_count(moe_shared_expert[0])
  7069. # Rope
  7070. rope_scaling = hparams.get("rope_scaling", {})
  7071. if rope_scaling.get("type") == "dynamic":
  7072. # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
  7073. # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
  7074. alpha = rope_scaling.get("alpha", 1000)
  7075. base = hparams.get("rope_theta", 10000.0)
  7076. dim = (hparams["hidden_size"] // hparams["num_attention_heads"]) # 128
  7077. scaled_base = base * (alpha ** (dim / (dim - 2))) # 10000 * (1000 ** (128 / 126)) = 11158839.9251
  7078. self.gguf_writer.add_rope_freq_base(scaled_base)
  7079. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  7080. self.gguf_writer.add_rope_scaling_factor(1)
  7081. # There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
  7082. self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
  7083. self.gguf_writer.add_context_length(256 * 1024) # 256k context length
  7084. # if any of our assumptions about the values are wrong, something has changed and this may need to be updated
  7085. assert alpha == 1000 and base == 10000.0 and dim == 128 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
  7086. "HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
  7087. _experts: list[dict[str, Tensor]] | None = None
  7088. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7089. if name == "lm_head.weight":
  7090. if self.hparams.get("tie_word_embeddings", False):
  7091. logger.info("Skipping tied output layer 'lm_head.weight'")
  7092. return []
  7093. if name.find("mlp.experts") != -1:
  7094. n_experts = self.hparams["num_experts"]
  7095. assert bid is not None
  7096. if self._experts is None:
  7097. self._experts = [{} for _ in range(self.block_count)]
  7098. self._experts[bid][name] = data_torch
  7099. if len(self._experts[bid]) >= n_experts * 3:
  7100. # merge the experts into a single 3d tensor
  7101. tensors: list[tuple[str, Tensor]] = []
  7102. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  7103. datas: list[Tensor] = []
  7104. for xid in range(n_experts):
  7105. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  7106. datas.append(self._experts[bid][ename])
  7107. del self._experts[bid][ename]
  7108. data_torch = torch.stack(datas, dim=0)
  7109. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  7110. new_name = self.map_tensor_name(merged_name)
  7111. tensors.append((new_name, data_torch))
  7112. return tensors
  7113. else:
  7114. return []
  7115. return [(self.map_tensor_name(name), data_torch)]
  7116. def prepare_tensors(self):
  7117. super().prepare_tensors()
  7118. if self._experts is not None:
  7119. experts = [k for d in self._experts for k in d.keys()]
  7120. if len(experts) > 0:
  7121. raise ValueError(f"Unprocessed experts: {experts}")
  7122. @ModelBase.register("LLaDAMoEModel", "LLaDAMoEModelLM")
  7123. class LLaDAMoEModel(TextModel):
  7124. model_arch = gguf.MODEL_ARCH.LLADA_MOE
  7125. def set_gguf_parameters(self):
  7126. super().set_gguf_parameters()
  7127. if (n_experts := self.hparams.get("num_experts")) is not None:
  7128. self.gguf_writer.add_expert_count(n_experts)
  7129. if (expert_intermediate_size := self.hparams.get("expert_intermediate_size")) is not None:
  7130. self.gguf_writer.add_expert_feed_forward_length(expert_intermediate_size)
  7131. # number of experts used per token (top-k)
  7132. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  7133. self.gguf_writer.add_expert_used_count(n_experts_used)
  7134. self.gguf_writer.add_mask_token_id(156895)
  7135. self.gguf_writer.add_causal_attention(False)
  7136. self.gguf_writer.add_diffusion_shift_logits(False)
  7137. _experts: list[dict[str, Tensor]] | None = None
  7138. # Copied from: Qwen2MoeModel
  7139. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7140. # process the experts separately
  7141. if name.find("experts") != -1:
  7142. n_experts = self.hparams["num_experts"]
  7143. assert bid is not None
  7144. if self._experts is None:
  7145. self._experts = [{} for _ in range(self.block_count)]
  7146. self._experts[bid][name] = data_torch
  7147. if len(self._experts[bid]) >= n_experts * 3:
  7148. tensors: list[tuple[str, Tensor]] = []
  7149. # merge the experts into a single 3d tensor
  7150. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  7151. datas: list[Tensor] = []
  7152. for xid in range(n_experts):
  7153. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  7154. datas.append(self._experts[bid][ename])
  7155. del self._experts[bid][ename]
  7156. data_torch = torch.stack(datas, dim=0)
  7157. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  7158. new_name = self.map_tensor_name(merged_name)
  7159. tensors.append((new_name, data_torch))
  7160. return tensors
  7161. else:
  7162. return []
  7163. return [(self.map_tensor_name(name), data_torch)]
  7164. # Copied from: Qwen2MoeModel
  7165. def prepare_tensors(self):
  7166. super().prepare_tensors()
  7167. if self._experts is not None:
  7168. # flatten `list[dict[str, Tensor]]` into `list[str]`
  7169. experts = [k for d in self._experts for k in d.keys()]
  7170. if len(experts) > 0:
  7171. raise ValueError(f"Unprocessed experts: {experts}")
  7172. @ModelBase.register("HunYuanDenseV1ForCausalLM")
  7173. class HunYuanModel(TextModel):
  7174. model_arch = gguf.MODEL_ARCH.HUNYUAN_DENSE
  7175. def set_vocab(self):
  7176. if (self.dir_model / "tokenizer.json").is_file():
  7177. self._set_vocab_gpt2()
  7178. else:
  7179. from transformers import AutoTokenizer
  7180. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  7181. # 1. Get the pre-tokenizer identifier hash
  7182. tokpre = self.get_vocab_base_pre(tokenizer)
  7183. # 2. Reverse-engineer the merges list from mergeable_ranks
  7184. merges = []
  7185. vocab = {}
  7186. mergeable_ranks = tokenizer.mergeable_ranks
  7187. for token, rank in mergeable_ranks.items():
  7188. vocab[QwenModel.token_bytes_to_string(token)] = rank
  7189. if len(token) == 1:
  7190. continue
  7191. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  7192. if len(merged) == 2:
  7193. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  7194. # 3. Generate the tokens and toktypes lists
  7195. vocab_size = self.hparams["vocab_size"]
  7196. assert tokenizer.vocab_size == vocab_size
  7197. special_tokens = tokenizer.special_tokens
  7198. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  7199. tokens: list[str] = []
  7200. toktypes: list[int] = []
  7201. for i in range(vocab_size):
  7202. if i not in reverse_vocab:
  7203. tokens.append(f"[PAD{i}]")
  7204. toktypes.append(gguf.TokenType.UNUSED)
  7205. else:
  7206. token = reverse_vocab[i]
  7207. tokens.append(token)
  7208. if i in special_tokens.values():
  7209. toktypes.append(gguf.TokenType.CONTROL)
  7210. else:
  7211. toktypes.append(gguf.TokenType.NORMAL)
  7212. # 4. Write all vocab-related fields to the GGUF writer
  7213. self.gguf_writer.add_tokenizer_model("gpt2")
  7214. self.gguf_writer.add_tokenizer_pre(tokpre)
  7215. self.gguf_writer.add_token_list(tokens)
  7216. self.gguf_writer.add_token_types(toktypes)
  7217. self.gguf_writer.add_token_merges(merges)
  7218. # 5. Add special tokens and chat templates
  7219. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  7220. special_vocab.add_to_gguf(self.gguf_writer)
  7221. # FIX for BOS token: Overwrite incorrect id read from config.json
  7222. if self.hparams['hidden_size'] == 4096:
  7223. self.gguf_writer.add_bos_token_id(127958) # only for 7b dense, fix <|bos|> token
  7224. def set_gguf_parameters(self):
  7225. super().set_gguf_parameters()
  7226. hparams = self.hparams
  7227. # Rope
  7228. rope_scaling = hparams.get("rope_scaling", {})
  7229. if rope_scaling.get("type") == "dynamic":
  7230. # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
  7231. # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
  7232. alpha = rope_scaling.get("alpha", 50)
  7233. base = hparams.get("rope_theta", 10000.0)
  7234. dim = hparams["head_dim"]
  7235. scaled_base = base * (alpha ** (dim / (dim - 2)))
  7236. self.gguf_writer.add_rope_freq_base(scaled_base)
  7237. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  7238. self.gguf_writer.add_rope_scaling_factor(1)
  7239. # There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
  7240. self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
  7241. self.gguf_writer.add_context_length(256 * 1024) # 256k context length
  7242. # if any of our assumptions about the values are wrong, something has changed and this may need to be updated
  7243. assert base == 10000.0 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
  7244. "HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
  7245. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7246. if name == "lm_head.weight":
  7247. if self.hparams.get("tie_word_embeddings", False):
  7248. logger.info("Skipping tied output layer 'lm_head.weight'")
  7249. return []
  7250. return [(self.map_tensor_name(name), data_torch)]
  7251. @ModelBase.register("SmolLM3ForCausalLM")
  7252. class SmolLM3Model(LlamaModel):
  7253. model_arch = gguf.MODEL_ARCH.SMOLLM3
  7254. def set_vocab(self):
  7255. super().set_vocab()
  7256. # remove unsupported array slicing in chat template
  7257. # ref: https://huggingface.co/ggml-org/SmolLM3-3B-GGUF/discussions/1
  7258. from transformers import AutoTokenizer
  7259. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  7260. if tokenizer.chat_template is not None:
  7261. chat_template = tokenizer.chat_template.replace("[:]", "")
  7262. self.gguf_writer.add_chat_template(chat_template)
  7263. @ModelBase.register("GptOssForCausalLM")
  7264. class GptOssModel(TextModel):
  7265. model_arch = gguf.MODEL_ARCH.GPT_OSS
  7266. # TODO: remove once MXFP4 is supported more generally
  7267. def dequant_model(self):
  7268. quant_config = self.hparams.get("quantization_config")
  7269. if quant_config is not None and quant_config.get("quant_method") == "mxfp4":
  7270. return
  7271. return super().dequant_model()
  7272. def transform_nibble_layout(self, tensor):
  7273. assert tensor.dtype == torch.uint8
  7274. assert tensor.shape[-1] == 16
  7275. # swap nibbles
  7276. t_lo = tensor & 0x0F
  7277. t_hi = tensor & 0xF0
  7278. t_swapped = (t_lo << 4) | (t_hi >> 4)
  7279. tensor = t_swapped
  7280. # transform aaaa...bbbb... to abababab...
  7281. blk_a, blk_b = tensor.chunk(2, dim=-1)
  7282. # get a_
  7283. blk_a0 = (blk_a & 0xF0).view(-1, 1)
  7284. blk_a1 = (blk_a << 4).view(-1, 1)
  7285. blk_a = torch.stack((blk_a0, blk_a1), dim=2).view(tensor.shape)
  7286. # get _b
  7287. blk_b0 = (blk_b >> 4).view(-1, 1)
  7288. blk_b1 = (blk_b & 0x0F).view(-1, 1)
  7289. blk_b = torch.stack((blk_b0, blk_b1), dim=2).view(tensor.shape)
  7290. # swap once more
  7291. out = blk_a | blk_b
  7292. out_h = out & 0xF0
  7293. out_l = out & 0x0F
  7294. out = (out_h >> 4) | (out_l << 4)
  7295. return out
  7296. def repack_mxfp4(self, new_name: str, blocks: Tensor, scales: Tensor):
  7297. assert blocks.dtype == torch.uint8
  7298. assert scales.dtype == torch.uint8
  7299. scales = scales.unsqueeze(-1)
  7300. assert len(blocks.shape) == 4
  7301. assert len(scales.shape) == 4
  7302. blocks = self.transform_nibble_layout(blocks)
  7303. new_data = torch.concat((scales, blocks), dim=-1)
  7304. new_shape = [new_data.shape[0], new_data.shape[1], new_data.shape[2] * 32]
  7305. logger.info(f"Repacked {new_name} with shape {new_shape} and quantization MXFP4")
  7306. # flatten last dim
  7307. new_data = new_data.view(new_data.shape[0], new_data.shape[1], new_data.shape[2] * new_data.shape[3])
  7308. new_data = new_data.numpy()
  7309. self.gguf_writer.add_tensor(new_name, new_data, raw_dtype=gguf.GGMLQuantizationType.MXFP4)
  7310. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  7311. blocks0: Tensor = torch.zeros(1)
  7312. blocks1: Tensor = torch.zeros(1)
  7313. # we assume that tensors are loaded in the correct order
  7314. for name, data_torch in self.get_tensors():
  7315. if "mlp.experts.down_proj_blocks" in name:
  7316. blocks0 = data_torch
  7317. elif "mlp.experts.down_proj_scales" in name:
  7318. new_name = self.map_tensor_name(name.replace("_scales", ".weight"))
  7319. self.repack_mxfp4(new_name, blocks0, data_torch)
  7320. elif "mlp.experts.gate_up_proj_blocks" in name:
  7321. blocks0, blocks1 = data_torch[:, ::2, :, :], data_torch[:, 1::2, :, :]
  7322. elif "mlp.experts.gate_up_proj_scales" in name:
  7323. scales0, scales1 = data_torch[:, ::2, :], data_torch[:, 1::2, :]
  7324. new_name_gate = self.map_tensor_name(name.replace("gate_up_proj_scales", "gate_proj.weight"))
  7325. new_name_up = self.map_tensor_name(name.replace("gate_up_proj_scales", "up_proj.weight"))
  7326. self.repack_mxfp4(new_name_gate, blocks0, scales0)
  7327. self.repack_mxfp4(new_name_up, blocks1, scales1)
  7328. return []
  7329. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7330. del bid # unused
  7331. if "sinks" in name:
  7332. name += ".weight"
  7333. # correct naming for down_proj
  7334. if "down_proj" in name:
  7335. if name.endswith("_bias"):
  7336. name = name.replace("down_proj_bias", "down_proj.bias")
  7337. elif "_blocks" not in name and "_scales" not in name:
  7338. logger.warning(f"{name} is not in MXFP4, performance may be degraded")
  7339. name = name.replace("down_proj", "down_proj.weight")
  7340. data_torch = data_torch.transpose(-1, -2)
  7341. else:
  7342. # otherwise, it should already be repacked to ggml MXFP4 format
  7343. return []
  7344. # split the gate_up into gate and up
  7345. if "gate_up_proj" in name:
  7346. if name.endswith("_bias"):
  7347. name_up = name.replace("gate_up_proj_bias", "up_proj.bias")
  7348. name_gate = name.replace("gate_up_proj_bias", "gate_proj.bias")
  7349. gate_proj_bias, up_proj_bias = data_torch[..., ::2], data_torch[..., 1::2]
  7350. return [
  7351. (self.map_tensor_name(name_gate), gate_proj_bias),
  7352. (self.map_tensor_name(name_up), up_proj_bias)
  7353. ]
  7354. elif "_blocks" not in name and "_scales" not in name:
  7355. logger.warning(f"{name} is not in MXFP4, performance may be degraded")
  7356. name_up = name.replace("gate_up_proj", "up_proj.weight")
  7357. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  7358. data_torch = data_torch.transpose(-1, -2)
  7359. gate_proj_weight, up_proj_weight = data_torch[:, ::2, :], data_torch[:, 1::2, :]
  7360. return [
  7361. (self.map_tensor_name(name_gate), gate_proj_weight),
  7362. (self.map_tensor_name(name_up), up_proj_weight)
  7363. ]
  7364. else:
  7365. # otherwise, it should already be repacked to ggml MXFP4 format
  7366. return []
  7367. return [(self.map_tensor_name(name), data_torch)]
  7368. def set_vocab(self):
  7369. self._set_vocab_gpt2()
  7370. def set_gguf_parameters(self):
  7371. super().set_gguf_parameters()
  7372. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  7373. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size"])
  7374. rope_scaling = self.hparams.get("rope_scaling") or {}
  7375. rope_type = rope_scaling.get("rope_type", rope_scaling.get("type"))
  7376. assert rope_type == "yarn", f"GPT-OSS only supports yarn rope scaling, got {rope_type}"
  7377. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  7378. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  7379. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling.get("original_max_position_embeddings", 4096))
  7380. @ModelBase.register("Lfm2ForCausalLM", "LFM2ForCausalLM")
  7381. class LFM2Model(TextModel):
  7382. model_arch = gguf.MODEL_ARCH.LFM2
  7383. def _add_feed_forward_length(self):
  7384. ff_dim = self.hparams["block_ff_dim"]
  7385. auto_adjust_ff_dim = self.hparams["block_auto_adjust_ff_dim"]
  7386. ff_dim = self.hparams["block_ff_dim"]
  7387. ffn_dim_multiplier = self.hparams["block_ffn_dim_multiplier"]
  7388. multiple_of = self.hparams["block_multiple_of"]
  7389. if auto_adjust_ff_dim:
  7390. ff_dim = int(2 * ff_dim / 3)
  7391. # custom dim factor multiplier
  7392. if ffn_dim_multiplier is not None:
  7393. ff_dim = int(ffn_dim_multiplier * ff_dim)
  7394. ff_dim = multiple_of * ((ff_dim + multiple_of - 1) // multiple_of)
  7395. self.gguf_writer.add_feed_forward_length(ff_dim)
  7396. def set_gguf_parameters(self):
  7397. # set num_key_value_heads only for attention layers
  7398. self.hparams["num_key_value_heads"] = [
  7399. self.hparams["num_key_value_heads"] if layer_type == "full_attention" else 0
  7400. for layer_type in self.hparams["layer_types"]
  7401. ]
  7402. super().set_gguf_parameters()
  7403. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  7404. self.gguf_writer.add_shortconv_l_cache(self.hparams["conv_L_cache"])
  7405. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["norm_eps"])
  7406. self._add_feed_forward_length()
  7407. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7408. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7409. if is_vision_tensor:
  7410. # skip vision tensors
  7411. return []
  7412. name = name.replace("language_model.", "")
  7413. # conv op requires 2d tensor
  7414. if 'conv.conv' in name:
  7415. data_torch = data_torch.squeeze(1)
  7416. return [(self.map_tensor_name(name), data_torch)]
  7417. @ModelBase.register("Lfm2MoeForCausalLM")
  7418. class LFM2MoeModel(TextModel):
  7419. model_arch = gguf.MODEL_ARCH.LFM2MOE
  7420. def set_gguf_parameters(self):
  7421. # set num_key_value_heads only for attention layers
  7422. self.hparams["num_key_value_heads"] = [
  7423. self.hparams["num_key_value_heads"] if layer_type == "full_attention" else 0
  7424. for layer_type in self.hparams["layer_types"]
  7425. ]
  7426. super().set_gguf_parameters()
  7427. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  7428. self.gguf_writer.add_expert_feed_forward_length(self.hparams["moe_intermediate_size"])
  7429. self.gguf_writer.add_leading_dense_block_count(self.hparams["num_dense_layers"])
  7430. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  7431. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  7432. self.gguf_writer.add_shortconv_l_cache(self.hparams["conv_L_cache"])
  7433. # cache for experts weights for merging
  7434. _experts_cache: dict[int, dict[str, Tensor]] = {}
  7435. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7436. # conv op requires 2d tensor
  7437. if 'conv.conv' in name:
  7438. data_torch = data_torch.squeeze(1)
  7439. if name.endswith(".expert_bias"):
  7440. name = name.replace(".expert_bias", ".expert_bias.bias")
  7441. # merge expert weights
  7442. if 'experts' in name:
  7443. n_experts = self.hparams["num_experts"]
  7444. assert bid is not None
  7445. expert_cache = self._experts_cache.setdefault(bid, {})
  7446. expert_cache[name] = data_torch
  7447. expert_weights = ["w1", "w2", "w3"]
  7448. # not enough expert weights to merge
  7449. if len(expert_cache) < n_experts * len(expert_weights):
  7450. return []
  7451. tensors: list[tuple[str, Tensor]] = []
  7452. for w_name in expert_weights:
  7453. datas: list[Tensor] = []
  7454. for xid in range(n_experts):
  7455. ename = f"model.layers.{bid}.feed_forward.experts.{xid}.{w_name}.weight"
  7456. datas.append(expert_cache[ename])
  7457. del expert_cache[ename]
  7458. data_torch = torch.stack(datas, dim=0)
  7459. merged_name = f"layers.{bid}.feed_forward.experts.{w_name}.weight"
  7460. new_name = self.map_tensor_name(merged_name)
  7461. tensors.append((new_name, data_torch))
  7462. del self._experts_cache[bid]
  7463. return tensors
  7464. return [(self.map_tensor_name(name), data_torch)]
  7465. def prepare_tensors(self):
  7466. super().prepare_tensors()
  7467. assert not self._experts_cache
  7468. @ModelBase.register("Lfm2VlForConditionalGeneration")
  7469. class LFM2VLModel(MmprojModel):
  7470. def __init__(self, *args, **kwargs):
  7471. super().__init__(*args, **kwargs)
  7472. assert self.hparams_vision is not None
  7473. # TODO(tarek): for dynamic resolution image_size is not specified, setting here for compatibility
  7474. self.hparams_vision["image_size"] = 256
  7475. def set_gguf_parameters(self):
  7476. super().set_gguf_parameters()
  7477. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.LFM2)
  7478. self.gguf_writer.add_vision_attention_layernorm_eps(self.find_vparam(["layer_norm_eps"]))
  7479. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("downsample_factor", 2))
  7480. self.gguf_writer.add_vision_use_gelu(True)
  7481. # python notation, e.g. for vision_feature_layer == -1, we pick last layer -> vision_feature_layers_to_drop = 0
  7482. vision_feature_layers_to_drop = -(self.global_config.get("vision_feature_layer", -1) + 1)
  7483. self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys) - vision_feature_layers_to_drop)
  7484. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7485. del bid # unused
  7486. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7487. if is_vision_tensor:
  7488. # remove "model." prefix
  7489. name = name.replace("model.vision_tower.", "vision_tower.")
  7490. name = name.replace("model.multi_modal_projector.", "multi_modal_projector.")
  7491. if "patch_embedding.weight" in name:
  7492. data_torch = data_torch.view(data_torch.shape[0], 16, 16, 3).permute(0, 3, 1, 2)
  7493. return [(self.map_tensor_name(name), data_torch)]
  7494. return [] # skip other tensors
  7495. @ModelBase.register("SmallThinkerForCausalLM")
  7496. class SmallThinkerModel(TextModel):
  7497. model_arch = gguf.MODEL_ARCH.SMALLTHINKER
  7498. def set_gguf_parameters(self):
  7499. super().set_gguf_parameters()
  7500. if (n_experts := self.hparams.get("num_experts", self.hparams.get("moe_num_primary_experts"))) is not None:
  7501. self.gguf_writer.add_expert_count(n_experts)
  7502. if (n_experts_used := self.hparams.get("num_experts_per_tok", self.hparams.get("moe_num_active_primary_experts"))) is not None:
  7503. self.gguf_writer.add_expert_used_count(n_experts_used)
  7504. if (moe_intermediate_size := self.hparams.get("moe_ffn_hidden_size")) is not None:
  7505. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  7506. self.gguf_writer.add_feed_forward_length(moe_intermediate_size)
  7507. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  7508. if (self.hparams.get('moe_primary_router_apply_softmax')):
  7509. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  7510. else:
  7511. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  7512. # YaRN is not enabled by default
  7513. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  7514. rope_scaling = self.hparams.get("rope_scaling") or {}
  7515. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  7516. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  7517. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  7518. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  7519. sliding_window_layout = self.hparams.get("sliding_window_layout")
  7520. if sliding_window_layout:
  7521. for i in sliding_window_layout:
  7522. if i != 0:
  7523. sliding_window = self.hparams.get("sliding_window_size")
  7524. if sliding_window:
  7525. self.gguf_writer.add_sliding_window(sliding_window)
  7526. break
  7527. _experts: list[dict[str, Tensor]] | None = None
  7528. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7529. # process the experts separately
  7530. if name.find("experts") != -1:
  7531. n_experts = self.hparams.get("num_experts", self.hparams.get("moe_num_primary_experts"))
  7532. assert bid is not None
  7533. if self._experts is None:
  7534. self._experts = [{} for _ in range(self.block_count)]
  7535. self._experts[bid][name] = data_torch
  7536. if len(self._experts[bid]) >= n_experts * 3:
  7537. tensors: list[tuple[str, Tensor]] = []
  7538. # merge the experts into a single 3d tensor
  7539. for w_name in ["down", "gate", "up"]:
  7540. datas: list[Tensor] = []
  7541. for xid in range(n_experts):
  7542. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  7543. datas.append(self._experts[bid][ename])
  7544. del self._experts[bid][ename]
  7545. data_torch = torch.stack(datas, dim=0)
  7546. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  7547. new_name = self.map_tensor_name(merged_name)
  7548. tensors.append((new_name, data_torch))
  7549. return tensors
  7550. else:
  7551. return []
  7552. return [(self.map_tensor_name(name), data_torch)]
  7553. def prepare_tensors(self):
  7554. super().prepare_tensors()
  7555. if self._experts is not None:
  7556. # flatten `list[dict[str, Tensor]]` into `list[str]`
  7557. experts = [k for d in self._experts for k in d.keys()]
  7558. if len(experts) > 0:
  7559. raise ValueError(f"Unprocessed experts: {experts}")
  7560. @ModelBase.register("ApertusForCausalLM")
  7561. class ApertusModel(LlamaModel):
  7562. model_arch = gguf.MODEL_ARCH.APERTUS
  7563. undo_permute = False
  7564. _alpha_n = {}
  7565. _alpha_p = {}
  7566. _beta = {}
  7567. _eps = {}
  7568. def modify_tensors(self, data_torch, name, bid):
  7569. # Handle xIELU activation parameters
  7570. n_layers = self.hparams["num_hidden_layers"]
  7571. if name.endswith(".act_fn.alpha_n"):
  7572. self._alpha_n[bid] = data_torch.to("cpu").float().item()
  7573. if (len(self._alpha_n) == n_layers):
  7574. self.gguf_writer.add_xielu_alpha_n([self._alpha_n[k] for k in sorted(self._alpha_n)])
  7575. return []
  7576. if name.endswith(".act_fn.alpha_p"):
  7577. self._alpha_p[bid] = data_torch.to("cpu").float().item()
  7578. if (len(self._alpha_p) == n_layers):
  7579. self.gguf_writer.add_xielu_alpha_p([self._alpha_p[k] for k in sorted(self._alpha_p)])
  7580. return []
  7581. if name.endswith(".act_fn.beta"):
  7582. self._beta[bid] = data_torch.to("cpu").float().item()
  7583. if (len(self._beta) == n_layers):
  7584. self.gguf_writer.add_xielu_beta([self._beta[k] for k in sorted(self._beta)])
  7585. return []
  7586. if name.endswith(".act_fn.eps"):
  7587. self._eps[bid] = data_torch.to("cpu").float().item()
  7588. if (len(self._eps) == n_layers):
  7589. self.gguf_writer.add_xielu_eps([self._eps[k] for k in sorted(self._eps)])
  7590. return []
  7591. return super().modify_tensors(data_torch, name, bid)
  7592. class MistralModel(LlamaModel):
  7593. model_arch = gguf.MODEL_ARCH.LLAMA
  7594. model_name = "Mistral"
  7595. hf_arch = ""
  7596. is_mistral_format = True
  7597. undo_permute = False
  7598. @staticmethod
  7599. def get_community_chat_template(vocab: MistralVocab, templates_dir: Path, is_mistral_format: bool):
  7600. assert TokenizerVersion is not None and Tekkenizer is not None and SentencePieceTokenizer is not None, _mistral_import_error_msg
  7601. assert isinstance(vocab.tokenizer, (Tekkenizer, SentencePieceTokenizer)), (
  7602. f"Expected Tekkenizer or SentencePieceTokenizer, got {type(vocab.tokenizer)}"
  7603. )
  7604. if vocab.tokenizer.version == TokenizerVersion.v1:
  7605. return "mistral-v1"
  7606. elif vocab.tokenizer.version == TokenizerVersion.v3 and vocab.tokenizer_type == MistralTokenizerType.spm:
  7607. return "mistral-v3"
  7608. elif vocab.tokenizer.version == TokenizerVersion.v3 and vocab.tokenizer_type == MistralTokenizerType.tekken:
  7609. return "mistral-v3-tekken"
  7610. elif vocab.tokenizer.version == TokenizerVersion.v7 and vocab.tokenizer_type == MistralTokenizerType.spm:
  7611. return "mistral-v7"
  7612. elif vocab.tokenizer.version == TokenizerVersion.v7 and vocab.tokenizer_type == MistralTokenizerType.tekken:
  7613. return "mistral-v7-tekken"
  7614. elif vocab.tokenizer.version == TokenizerVersion.v11:
  7615. template_file = "Mistral-Small-3.2-24B-Instruct-2506.jinja"
  7616. elif vocab.tokenizer.version == TokenizerVersion.v13:
  7617. template_file = "unsloth-mistral-Devstral-Small-2507.jinja"
  7618. else:
  7619. err_message = f"Unknown tokenizer type: {vocab.tokenizer_type} and version {vocab.tokenizer.version}"
  7620. if is_mistral_format:
  7621. err_message += (
  7622. " . Please pass --disable-mistral-community-chat-template argument to the CLI "
  7623. "if you want to skip this error and use the Mistral official `mistral-common` pre-processing library."
  7624. )
  7625. raise ValueError(err_message)
  7626. template_path = templates_dir / template_file
  7627. if not template_path.exists():
  7628. raise FileNotFoundError(f"Template file not found: {template_path}")
  7629. with open(template_path, "r", encoding="utf-8") as f:
  7630. template = f.read()
  7631. return template
  7632. class PixtralModel(LlavaVisionModel):
  7633. model_name = "Pixtral"
  7634. hf_arch = ""
  7635. is_mistral_format = True
  7636. def set_gguf_parameters(self):
  7637. super().set_gguf_parameters()
  7638. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PIXTRAL)
  7639. self.gguf_writer.add_vision_attention_layernorm_eps(
  7640. self.find_hparam(["norm_eps"])
  7641. )
  7642. self.gguf_writer.add_rope_freq_base(self.find_vparam(["rope_theta"]))
  7643. self.gguf_writer.add_vision_use_silu(True)
  7644. # spatial_merge_size
  7645. if self.find_vparam(["mm_projector_id"]) == "patch_merge":
  7646. self.gguf_writer.add_vision_spatial_merge_size(
  7647. self.find_vparam(["spatial_merge_size"])
  7648. )
  7649. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  7650. if name == "vision_language_adapter.w_in.weight":
  7651. return "mm.1.weight"
  7652. elif name == "vision_language_adapter.w_out.weight":
  7653. return "mm.2.weight"
  7654. return super().map_tensor_name(name, try_suffixes)
  7655. @ModelBase.register("KimiVLForConditionalGeneration")
  7656. class KimiVLModel(MmprojModel):
  7657. def __init__(self, *args, **kwargs):
  7658. super().__init__(*args, **kwargs)
  7659. assert self.hparams_vision is not None
  7660. self.hparams_vision["image_size"] = 64 * 14 # for compatibility
  7661. def set_gguf_parameters(self):
  7662. super().set_gguf_parameters()
  7663. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.KIMIVL)
  7664. self.gguf_writer.add_vision_use_gelu(True)
  7665. self.gguf_writer.add_vision_projector_scale_factor(2)
  7666. # eps is the same as pytorch's default value
  7667. assert self.hparams_vision is not None
  7668. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams_vision.get("layer_norm_eps", 1e-5))
  7669. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7670. del bid # unused
  7671. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7672. if is_vision_tensor:
  7673. if "pos_emb.weight" in name:
  7674. data_torch = data_torch.view(data_torch.shape[0] * data_torch.shape[1], data_torch.shape[2])
  7675. elif "wqkv" in name:
  7676. split_dim = 0 if "weight" in name else -1
  7677. wq, wk, wv = data_torch.chunk(3, dim=split_dim)
  7678. return [
  7679. (self.map_tensor_name(name.replace("wqkv", "wq")), wq),
  7680. (self.map_tensor_name(name.replace("wqkv", "wk")), wk),
  7681. (self.map_tensor_name(name.replace("wqkv", "wv")), wv)
  7682. ]
  7683. return [(self.map_tensor_name(name), data_torch)]
  7684. return [] # skip other tensors
  7685. ###### CONVERSION LOGIC ######
  7686. # tree of lazy tensors
  7687. class LazyTorchTensor(gguf.LazyBase):
  7688. _tensor_type = torch.Tensor
  7689. # to keep the type-checker happy
  7690. dtype: torch.dtype
  7691. shape: torch.Size
  7692. # only used when converting a torch.Tensor to a np.ndarray
  7693. _dtype_map: dict[torch.dtype, type] = {
  7694. torch.float16: np.float16,
  7695. torch.float32: np.float32,
  7696. torch.uint8: np.uint8,
  7697. }
  7698. # used for safetensors slices
  7699. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  7700. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  7701. _dtype_str_map: dict[str, torch.dtype] = {
  7702. "F64": torch.float64,
  7703. "F32": torch.float32,
  7704. "BF16": torch.bfloat16,
  7705. "F16": torch.float16,
  7706. # "U64": torch.uint64,
  7707. "I64": torch.int64,
  7708. # "U32": torch.uint32,
  7709. "I32": torch.int32,
  7710. # "U16": torch.uint16,
  7711. "I16": torch.int16,
  7712. "U8": torch.uint8,
  7713. "I8": torch.int8,
  7714. "BOOL": torch.bool,
  7715. "F8_E4M3": torch.float8_e4m3fn,
  7716. "F8_E5M2": torch.float8_e5m2,
  7717. }
  7718. def numpy(self) -> gguf.LazyNumpyTensor:
  7719. dtype = self._dtype_map[self.dtype]
  7720. return gguf.LazyNumpyTensor(
  7721. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  7722. args=(self,),
  7723. func=(lambda s: s.numpy())
  7724. )
  7725. @classmethod
  7726. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  7727. return torch.empty(size=shape, dtype=dtype, device="meta")
  7728. @classmethod
  7729. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  7730. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  7731. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  7732. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[...] if len(s.get_shape()) == 0 else s[:])
  7733. return cast(torch.Tensor, lazy)
  7734. @classmethod
  7735. def from_remote_tensor(cls, remote_tensor: gguf.utility.RemoteTensor):
  7736. dtype = cls._dtype_str_map[remote_tensor.dtype]
  7737. shape = remote_tensor.shape
  7738. meta = cls.meta_with_dtype_and_shape(dtype, shape)
  7739. lazy = cls(meta=meta, args=(remote_tensor,), func=lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape))
  7740. return cast(torch.Tensor, lazy)
  7741. @classmethod
  7742. def __torch_function__(cls, func, types, args=(), kwargs=None):
  7743. del types # unused
  7744. if kwargs is None:
  7745. kwargs = {}
  7746. if func is torch.Tensor.numpy:
  7747. return args[0].numpy()
  7748. return cls._wrap_fn(func)(*args, **kwargs)
  7749. def parse_args() -> argparse.Namespace:
  7750. parser = argparse.ArgumentParser(
  7751. description="Convert a huggingface model to a GGML compatible file")
  7752. parser.add_argument(
  7753. "--vocab-only", action="store_true",
  7754. help="extract only the vocab",
  7755. )
  7756. parser.add_argument(
  7757. "--outfile", type=Path,
  7758. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  7759. )
  7760. parser.add_argument(
  7761. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
  7762. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  7763. )
  7764. parser.add_argument(
  7765. "--bigendian", action="store_true",
  7766. help="model is executed on big endian machine",
  7767. )
  7768. parser.add_argument(
  7769. "model", type=str,
  7770. help="directory containing model file or huggingface repository ID (if --remote)",
  7771. nargs="?",
  7772. )
  7773. parser.add_argument(
  7774. "--use-temp-file", action="store_true",
  7775. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  7776. )
  7777. parser.add_argument(
  7778. "--no-lazy", action="store_true",
  7779. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  7780. )
  7781. parser.add_argument(
  7782. "--model-name", type=str, default=None,
  7783. help="name of the model",
  7784. )
  7785. parser.add_argument(
  7786. "--verbose", action="store_true",
  7787. help="increase output verbosity",
  7788. )
  7789. parser.add_argument(
  7790. "--split-max-tensors", type=int, default=0,
  7791. help="max tensors in each split",
  7792. )
  7793. parser.add_argument(
  7794. "--split-max-size", type=str, default="0",
  7795. help="max size per split N(M|G)",
  7796. )
  7797. parser.add_argument(
  7798. "--dry-run", action="store_true",
  7799. help="only print out a split plan and exit, without writing any new files",
  7800. )
  7801. parser.add_argument(
  7802. "--no-tensor-first-split", action="store_true",
  7803. help="do not add tensors to the first split (disabled by default)"
  7804. )
  7805. parser.add_argument(
  7806. "--metadata", type=Path,
  7807. help="Specify the path for an authorship metadata override file"
  7808. )
  7809. parser.add_argument(
  7810. "--print-supported-models", action="store_true",
  7811. help="Print the supported models"
  7812. )
  7813. parser.add_argument(
  7814. "--remote", action="store_true",
  7815. help="(Experimental) Read safetensors file remotely without downloading to disk. Config and tokenizer files will still be downloaded. To use this feature, you need to specify Hugging Face model repo name instead of a local directory. For example: 'HuggingFaceTB/SmolLM2-1.7B-Instruct'. Note: To access gated repo, set HF_TOKEN environment variable to your Hugging Face token.",
  7816. )
  7817. parser.add_argument(
  7818. "--mmproj", action="store_true",
  7819. help="(Experimental) Export multimodal projector (mmproj) for vision models. This will only work on some vision models. A prefix 'mmproj-' will be added to the output file name.",
  7820. )
  7821. parser.add_argument(
  7822. "--mistral-format", action="store_true",
  7823. help="Whether the model is stored following the Mistral format.",
  7824. )
  7825. parser.add_argument(
  7826. "--disable-mistral-community-chat-template", action="store_true",
  7827. help=(
  7828. "Whether to disable usage of Mistral community chat templates. If set, use the Mistral official `mistral-common` library for tokenization and detokenization of Mistral models. "
  7829. "Using `mistral-common` ensure correctness and zero-day support of tokenization for models converted from the Mistral format but requires to manually setup the tokenization server."
  7830. )
  7831. )
  7832. parser.add_argument(
  7833. "--sentence-transformers-dense-modules", action="store_true",
  7834. help=("Whether to include sentence-transformers dense modules."
  7835. "It can be used for sentence-transformers models, like google/embeddinggemma-300m"
  7836. "Default these modules are not included.")
  7837. )
  7838. args = parser.parse_args()
  7839. if not args.print_supported_models and args.model is None:
  7840. parser.error("the following arguments are required: model")
  7841. return args
  7842. def split_str_to_n_bytes(split_str: str) -> int:
  7843. if split_str.endswith("K"):
  7844. n = int(split_str[:-1]) * 1000
  7845. elif split_str.endswith("M"):
  7846. n = int(split_str[:-1]) * 1000 * 1000
  7847. elif split_str.endswith("G"):
  7848. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  7849. elif split_str.isnumeric():
  7850. n = int(split_str)
  7851. else:
  7852. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  7853. if n < 0:
  7854. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  7855. return n
  7856. def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> str:
  7857. # TODO @ngxson : this won't work correctly if the model has both audio & vision encoders
  7858. # maybe we should fallback to text model's arch in that case, since not many models have both
  7859. text_config = hparams.get("text_config", {})
  7860. vision_config = hparams.get("vision_config", {})
  7861. arch = None
  7862. if (arches := hparams.get("architectures")) is not None and len(arches) > 0:
  7863. arch = arches[0]
  7864. elif "ssm_cfg" in hparams:
  7865. # For non-hf Mamba and Mamba2 models
  7866. arch = hparams["ssm_cfg"].get("layer", "Mamba") + "ForCausalLM"
  7867. # if "architectures" is found in the sub-config, use that instead
  7868. if model_type == ModelType.TEXT and text_config.get("architectures") is not None:
  7869. arch = text_config["architectures"][0]
  7870. elif model_type == ModelType.MMPROJ and vision_config.get("architectures") is not None:
  7871. arch = vision_config["architectures"][0]
  7872. if arch is None:
  7873. raise ValueError("Failed to detect model architecture")
  7874. return arch
  7875. def main() -> None:
  7876. args = parse_args()
  7877. if args.print_supported_models:
  7878. logger.error("Supported models:")
  7879. ModelBase.print_registered_models()
  7880. sys.exit(0)
  7881. if args.verbose:
  7882. logging.basicConfig(level=logging.DEBUG)
  7883. else:
  7884. logging.basicConfig(level=logging.INFO)
  7885. if args.remote:
  7886. hf_repo_id = args.model
  7887. from huggingface_hub import snapshot_download
  7888. allowed_patterns = ["LICENSE", "*.json", "*.md", "*.txt", "tokenizer.model"]
  7889. if args.sentence_transformers_dense_modules:
  7890. # include sentence-transformers dense modules safetensors files
  7891. allowed_patterns.append("*.safetensors")
  7892. local_dir = snapshot_download(
  7893. repo_id=hf_repo_id,
  7894. allow_patterns=allowed_patterns)
  7895. dir_model = Path(local_dir)
  7896. logger.info(f"Downloaded config and tokenizer to {local_dir}")
  7897. else:
  7898. hf_repo_id = None
  7899. dir_model = Path(args.model)
  7900. if not dir_model.is_dir():
  7901. logger.error(f'Error: {dir_model} is not a directory')
  7902. sys.exit(1)
  7903. ftype_map: dict[str, gguf.LlamaFileType] = {
  7904. "f32": gguf.LlamaFileType.ALL_F32,
  7905. "f16": gguf.LlamaFileType.MOSTLY_F16,
  7906. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  7907. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  7908. "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
  7909. "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
  7910. "auto": gguf.LlamaFileType.GUESSED,
  7911. }
  7912. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  7913. if args.use_temp_file and is_split:
  7914. logger.error("Error: Cannot use temp file when splitting")
  7915. sys.exit(1)
  7916. if args.outfile is not None:
  7917. fname_out = args.outfile
  7918. elif hf_repo_id:
  7919. # if remote, use the model ID as the output file name
  7920. fname_out = Path("./" + hf_repo_id.replace("/", "-") + "-{ftype}.gguf")
  7921. else:
  7922. fname_out = dir_model
  7923. logger.info(f"Loading model: {dir_model.name}")
  7924. is_mistral_format = args.mistral_format
  7925. if is_mistral_format and not _mistral_common_installed:
  7926. raise ImportError(_mistral_import_error_msg)
  7927. disable_mistral_community_chat_template = args.disable_mistral_community_chat_template
  7928. with torch.inference_mode():
  7929. output_type = ftype_map[args.outtype]
  7930. model_type = ModelType.MMPROJ if args.mmproj else ModelType.TEXT
  7931. hparams = ModelBase.load_hparams(dir_model, is_mistral_format)
  7932. if not is_mistral_format:
  7933. model_architecture = get_model_architecture(hparams, model_type)
  7934. logger.info(f"Model architecture: {model_architecture}")
  7935. try:
  7936. model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type)
  7937. except NotImplementedError:
  7938. logger.error(f"Model {model_architecture} is not supported")
  7939. sys.exit(1)
  7940. elif args.mmproj:
  7941. assert hparams.get("vision_encoder") is not None, "This model does not support multimodal"
  7942. model_class = PixtralModel
  7943. else:
  7944. model_class = MistralModel
  7945. model_instance = model_class(dir_model, output_type, fname_out,
  7946. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  7947. eager=args.no_lazy,
  7948. metadata_override=args.metadata, model_name=args.model_name,
  7949. split_max_tensors=args.split_max_tensors,
  7950. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  7951. small_first_shard=args.no_tensor_first_split,
  7952. remote_hf_model_id=hf_repo_id, disable_mistral_community_chat_template=disable_mistral_community_chat_template,
  7953. sentence_transformers_dense_modules=args.sentence_transformers_dense_modules
  7954. )
  7955. if args.vocab_only:
  7956. logger.info("Exporting model vocab...")
  7957. model_instance.write_vocab()
  7958. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  7959. else:
  7960. logger.info("Exporting model...")
  7961. model_instance.write()
  7962. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  7963. logger.info(f"Model successfully exported to {out_path}")
  7964. if __name__ == '__main__':
  7965. main()