1
0

convert_hf_to_gguf.py 283 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import ast
  5. import logging
  6. import argparse
  7. import contextlib
  8. import json
  9. import os
  10. import re
  11. import sys
  12. from enum import IntEnum
  13. from pathlib import Path
  14. from hashlib import sha256
  15. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  16. from itertools import chain
  17. from transformers import AutoConfig
  18. import math
  19. import numpy as np
  20. import torch
  21. if TYPE_CHECKING:
  22. from torch import Tensor
  23. if 'NO_LOCAL_GGUF' not in os.environ:
  24. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  25. import gguf
  26. logger = logging.getLogger("hf-to-gguf")
  27. ###### MODEL DEFINITIONS ######
  28. class SentencePieceTokenTypes(IntEnum):
  29. NORMAL = 1
  30. UNKNOWN = 2
  31. CONTROL = 3
  32. USER_DEFINED = 4
  33. UNUSED = 5
  34. BYTE = 6
  35. class ModelType(IntEnum):
  36. TEXT = 1
  37. VISION = 2
  38. AnyModel = TypeVar("AnyModel", bound="type[ModelBase]")
  39. class ModelBase:
  40. _model_classes: dict[ModelType, dict[str, type[ModelBase]]] = {
  41. ModelType.TEXT: {},
  42. ModelType.VISION: {},
  43. }
  44. dir_model: Path
  45. ftype: gguf.LlamaFileType
  46. fname_out: Path
  47. is_big_endian: bool
  48. endianess: gguf.GGUFEndian
  49. use_temp_file: bool
  50. lazy: bool
  51. part_names: list[str]
  52. is_safetensors: bool
  53. hparams: dict[str, Any]
  54. tensor_names: set[str] | None
  55. gguf_writer: gguf.GGUFWriter
  56. model_name: str | None
  57. metadata_override: Path | None
  58. dir_model_card: Path
  59. remote_hf_model_id: str | None
  60. # subclasses should define this!
  61. model_arch: gguf.MODEL_ARCH
  62. # subclasses should initialize this!
  63. block_count: int
  64. tensor_map: gguf.TensorNameMap
  65. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, *, is_big_endian: bool = False,
  66. use_temp_file: bool = False, eager: bool = False,
  67. metadata_override: Path | None = None, model_name: str | None = None,
  68. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
  69. small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None):
  70. if type(self) is ModelBase or \
  71. type(self) is TextModel or \
  72. type(self) is VisionModel:
  73. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  74. self.dir_model = dir_model
  75. self.ftype = ftype
  76. self.fname_out = fname_out
  77. self.is_big_endian = is_big_endian
  78. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  79. self.use_temp_file = use_temp_file
  80. self.lazy = not eager or (remote_hf_model_id is not None)
  81. self.remote_hf_model_id = remote_hf_model_id
  82. if remote_hf_model_id is not None:
  83. self.is_safetensors = True
  84. def get_remote_tensors() -> Iterator[tuple[str, Tensor]]:
  85. logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}")
  86. remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id)
  87. self.tensor_names = set(name for name in remote_tensors.keys())
  88. for name, remote_tensor in gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id).items():
  89. yield (name, LazyTorchTensor.from_remote_tensor(remote_tensor))
  90. self.get_tensors = get_remote_tensors
  91. else:
  92. self.part_names = ModelBase.get_model_part_names(self.dir_model, "model", ".safetensors")
  93. self.is_safetensors = len(self.part_names) > 0
  94. if not self.is_safetensors:
  95. self.part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  96. self.hparams = ModelBase.load_hparams(self.dir_model) if hparams is None else hparams
  97. self.tensor_names = None
  98. self.metadata_override = metadata_override
  99. self.model_name = model_name
  100. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  101. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  102. if self.ftype == gguf.LlamaFileType.GUESSED:
  103. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  104. _, first_tensor = next(self.get_tensors())
  105. if first_tensor.dtype == torch.float16:
  106. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  107. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  108. else:
  109. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  110. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  111. # Configure GGUF Writer
  112. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  113. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  114. @classmethod
  115. def add_prefix_to_filename(cls, path: Path, prefix: str) -> Path:
  116. stem, suffix = path.stem, path.suffix
  117. new_name = f"{prefix}{stem}{suffix}"
  118. return path.with_name(new_name)
  119. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  120. key = next((k for k in keys if k in self.hparams), None)
  121. if key is not None:
  122. return self.hparams[key]
  123. if optional:
  124. return None
  125. raise KeyError(f"could not find any of: {keys}")
  126. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  127. tensor_names_from_parts: set[str] = set()
  128. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  129. index_name += ".index.json"
  130. index_file = self.dir_model / index_name
  131. if index_file.is_file():
  132. self.tensor_names = set()
  133. logger.info(f"gguf: loading model weight map from '{index_name}'")
  134. with open(index_file, "r", encoding="utf-8") as f:
  135. index: dict[str, Any] = json.load(f)
  136. weight_map = index.get("weight_map")
  137. if weight_map is None or not isinstance(weight_map, dict):
  138. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  139. self.tensor_names.update(weight_map.keys())
  140. else:
  141. self.tensor_names = tensor_names_from_parts
  142. weight_map = {}
  143. for part_name in self.part_names:
  144. logger.info(f"gguf: loading model part '{part_name}'")
  145. ctx: ContextManager[Any]
  146. if self.is_safetensors:
  147. from safetensors import safe_open
  148. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  149. else:
  150. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  151. with ctx as model_part:
  152. tensor_names_from_parts.update(model_part.keys())
  153. for name in model_part.keys():
  154. if self.is_safetensors:
  155. if self.lazy:
  156. data = model_part.get_slice(name)
  157. data = LazyTorchTensor.from_safetensors_slice(data)
  158. else:
  159. data = model_part.get_tensor(name)
  160. else:
  161. data = model_part[name]
  162. if self.lazy:
  163. data = LazyTorchTensor.from_eager(data)
  164. yield name, data
  165. # verify tensor name presence and identify potentially missing files
  166. if len(tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  167. missing = sorted(self.tensor_names.difference(tensor_names_from_parts))
  168. extra = sorted(tensor_names_from_parts.difference(self.tensor_names))
  169. missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
  170. if len(extra) == 0 and len(missing_files) > 0:
  171. raise ValueError(f"Missing or incomplete model files: {missing_files}\n"
  172. f"Missing tensors: {missing}")
  173. else:
  174. raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
  175. f"Missing tensors: {missing}\n"
  176. f"Extra tensors: {extra}")
  177. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  178. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  179. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  180. name: str = gguf.TENSOR_NAMES[key]
  181. if "{bid}" in name:
  182. assert bid is not None
  183. name = name.format(bid=bid)
  184. return name + suffix
  185. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  186. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  187. return False
  188. key_name: str = gguf.TENSOR_NAMES[key]
  189. if "{bid}" in key_name:
  190. if bid is None:
  191. return False
  192. key_name = key_name.format(bid=bid)
  193. else:
  194. if bid is not None:
  195. return False
  196. return name == (key_name + suffix)
  197. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  198. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  199. if new_name is None:
  200. raise ValueError(f"Can not map tensor {name!r}")
  201. return new_name
  202. def set_gguf_parameters(self):
  203. raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses")
  204. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  205. del bid # unused
  206. return [(self.map_tensor_name(name), data_torch)]
  207. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  208. del name, new_name, bid, n_dims # unused
  209. return False
  210. # some models need extra generated tensors (like rope_freqs)
  211. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  212. return ()
  213. def prepare_tensors(self):
  214. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  215. for name, data_torch in chain(self.generate_extra_tensors(), self.get_tensors()):
  216. # we don't need these
  217. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  218. continue
  219. old_dtype = data_torch.dtype
  220. # convert any unsupported data types to float32
  221. if data_torch.dtype not in (torch.float16, torch.float32):
  222. data_torch = data_torch.to(torch.float32)
  223. # use the first number-like part of the tensor name as the block id
  224. bid = None
  225. for part in name.split("."):
  226. if part.isdecimal():
  227. bid = int(part)
  228. break
  229. for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
  230. # TODO: why do we squeeze here?
  231. # data = data_torch.squeeze().numpy()
  232. data = data_torch.numpy()
  233. # if data ends up empty, it means data_torch was a scalar tensor -> restore
  234. if len(data.shape) == 0:
  235. data = data_torch.numpy()
  236. n_dims = len(data.shape)
  237. data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
  238. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  239. if n_dims <= 1 or new_name.endswith("_norm.weight"):
  240. data_qtype = gguf.GGMLQuantizationType.F32
  241. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  242. # Some tensor types are always in float32
  243. if data_qtype is False and (
  244. any(
  245. self.match_model_tensor_name(new_name, key, bid)
  246. for key in (
  247. gguf.MODEL_TENSOR.FFN_GATE_INP,
  248. gguf.MODEL_TENSOR.POS_EMBD,
  249. gguf.MODEL_TENSOR.TOKEN_TYPES,
  250. gguf.MODEL_TENSOR.SSM_CONV1D,
  251. gguf.MODEL_TENSOR.TIME_MIX_FIRST,
  252. gguf.MODEL_TENSOR.TIME_MIX_W1,
  253. gguf.MODEL_TENSOR.TIME_MIX_W2,
  254. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
  255. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
  256. gguf.MODEL_TENSOR.TIME_MIX_LERP_FUSED,
  257. gguf.MODEL_TENSOR.POSNET_NORM1,
  258. gguf.MODEL_TENSOR.POSNET_NORM2,
  259. )
  260. )
  261. or not new_name.endswith(".weight")
  262. ):
  263. data_qtype = gguf.GGMLQuantizationType.F32
  264. if data_qtype is False and any(
  265. self.match_model_tensor_name(new_name, key, bid)
  266. for key in (
  267. gguf.MODEL_TENSOR.TOKEN_EMBD,
  268. gguf.MODEL_TENSOR.OUTPUT,
  269. )
  270. ):
  271. if self.ftype in (
  272. gguf.LlamaFileType.MOSTLY_TQ1_0,
  273. gguf.LlamaFileType.MOSTLY_TQ2_0,
  274. ):
  275. # TODO: use Q4_K and Q6_K
  276. data_qtype = gguf.GGMLQuantizationType.F16
  277. # No override (data_qtype is False), or wants to be quantized (data_qtype is True)
  278. if isinstance(data_qtype, bool):
  279. if self.ftype == gguf.LlamaFileType.ALL_F32:
  280. data_qtype = gguf.GGMLQuantizationType.F32
  281. elif self.ftype == gguf.LlamaFileType.MOSTLY_F16:
  282. data_qtype = gguf.GGMLQuantizationType.F16
  283. elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  284. data_qtype = gguf.GGMLQuantizationType.BF16
  285. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
  286. data_qtype = gguf.GGMLQuantizationType.Q8_0
  287. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
  288. data_qtype = gguf.GGMLQuantizationType.TQ1_0
  289. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
  290. data_qtype = gguf.GGMLQuantizationType.TQ2_0
  291. else:
  292. raise ValueError(f"Unknown file type: {self.ftype.name}")
  293. try:
  294. data = gguf.quants.quantize(data, data_qtype)
  295. except gguf.QuantError as e:
  296. logger.warning("%s, %s", e, "falling back to F16")
  297. data_qtype = gguf.GGMLQuantizationType.F16
  298. data = gguf.quants.quantize(data, data_qtype)
  299. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  300. # reverse shape to make it similar to the internal ggml dimension order
  301. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  302. # n_dims is implicit in the shape
  303. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  304. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  305. def set_type(self):
  306. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  307. def prepare_metadata(self, vocab_only: bool):
  308. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  309. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  310. # If we are using HF model id, set the metadata name to the model id
  311. if self.remote_hf_model_id:
  312. self.metadata.name = self.remote_hf_model_id
  313. # Fallback to model directory name if metadata name is still missing
  314. if self.metadata.name is None:
  315. self.metadata.name = self.dir_model.name
  316. # Generate parameter weight class (useful for leader boards) if not yet determined
  317. if self.metadata.size_label is None and total_params > 0:
  318. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  319. self.set_type()
  320. logger.info("Set meta model")
  321. self.metadata.set_gguf_meta_model(self.gguf_writer)
  322. logger.info("Set model parameters")
  323. self.set_gguf_parameters()
  324. logger.info("Set model quantization version")
  325. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  326. def write_vocab(self):
  327. raise NotImplementedError("write_vocab() must be implemented in subclasses")
  328. def write(self):
  329. self.prepare_tensors()
  330. self.prepare_metadata(vocab_only=False)
  331. self.gguf_writer.write_header_to_file(path=self.fname_out)
  332. self.gguf_writer.write_kv_data_to_file()
  333. self.gguf_writer.write_tensors_to_file(progress=True)
  334. self.gguf_writer.close()
  335. @staticmethod
  336. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  337. part_names: list[str] = []
  338. for filename in os.listdir(dir_model):
  339. if filename.startswith(prefix) and filename.endswith(suffix):
  340. part_names.append(filename)
  341. part_names.sort()
  342. return part_names
  343. @staticmethod
  344. def load_hparams(dir_model: Path):
  345. try:
  346. # for security reason, we don't allow loading remote code by default
  347. # if a model need remote code, we will fallback to config.json
  348. return AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict()
  349. except Exception as e:
  350. logger.warning(f"Failed to load model config from {dir_model}: {e}")
  351. logger.warning("Trying to load config.json instead")
  352. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  353. config = json.load(f)
  354. if "llm_config" in config:
  355. # rename for InternVL
  356. config["text_config"] = config["llm_config"]
  357. return config
  358. @classmethod
  359. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  360. assert names
  361. def func(modelcls: AnyModel) -> AnyModel:
  362. model_type = ModelType.VISION if modelcls.model_arch == gguf.MODEL_ARCH.CLIP_VISION else ModelType.TEXT
  363. for name in names:
  364. cls._model_classes[model_type][name] = modelcls
  365. return modelcls
  366. return func
  367. @classmethod
  368. def print_registered_models(cls):
  369. for model_type, model_classes in cls._model_classes.items():
  370. logger.error(f"{model_type.name} models:")
  371. for name in sorted(model_classes.keys()):
  372. logger.error(f" - {name}")
  373. @classmethod
  374. def from_model_architecture(cls, arch: str, model_type = ModelType.TEXT) -> type[ModelBase]:
  375. try:
  376. return cls._model_classes[model_type][arch]
  377. except KeyError:
  378. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  379. class TextModel(ModelBase):
  380. model_type = ModelType.TEXT
  381. hf_arch: str
  382. def __init__(self, *args, **kwargs):
  383. super().__init__(*args, **kwargs)
  384. self.hf_arch = get_model_architecture(self.hparams, self.model_type)
  385. if "text_config" in self.hparams:
  386. # move the text_config to the root level
  387. self.hparams = {**self.hparams, **self.hparams["text_config"]}
  388. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  389. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  390. @classmethod
  391. def __init_subclass__(cls):
  392. # can't use an abstract property, because overriding it without type errors
  393. # would require using decorated functions instead of simply defining the property
  394. if "model_arch" not in cls.__dict__:
  395. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  396. def set_vocab(self):
  397. self._set_vocab_gpt2()
  398. def prepare_metadata(self, vocab_only: bool):
  399. super().prepare_metadata(vocab_only=vocab_only)
  400. total_params = self.gguf_writer.get_total_parameter_count()[0]
  401. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  402. output_type: str = self.ftype.name.partition("_")[2]
  403. # Filename Output
  404. if self.fname_out.is_dir():
  405. # Generate default filename based on model specification and available metadata
  406. if not vocab_only:
  407. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  408. else:
  409. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  410. # Use the default filename
  411. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  412. else:
  413. # Output path is a custom defined templated filename
  414. # Note: `not is_dir()` is used because `.is_file()` will not detect
  415. # file template strings as it doesn't actually exist as a file
  416. # Process templated file name with the output ftype, useful with the "auto" ftype
  417. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  418. logger.info("Set model tokenizer")
  419. self.set_vocab()
  420. def set_gguf_parameters(self):
  421. self.gguf_writer.add_block_count(self.block_count)
  422. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions"], optional=True)) is not None:
  423. self.gguf_writer.add_context_length(n_ctx)
  424. logger.info(f"gguf: context length = {n_ctx}")
  425. if (n_embd := self.find_hparam(["hidden_size", "n_embd"], optional=True)) is not None:
  426. self.gguf_writer.add_embedding_length(n_embd)
  427. logger.info(f"gguf: embedding length = {n_embd}")
  428. if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
  429. self.gguf_writer.add_feed_forward_length(n_ff)
  430. logger.info(f"gguf: feed forward length = {n_ff}")
  431. if (n_head := self.find_hparam(["num_attention_heads", "n_head"], optional=True)) is not None:
  432. self.gguf_writer.add_head_count(n_head)
  433. logger.info(f"gguf: head count = {n_head}")
  434. if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
  435. self.gguf_writer.add_head_count_kv(n_head_kv)
  436. logger.info(f"gguf: key-value head count = {n_head_kv}")
  437. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  438. self.gguf_writer.add_rope_freq_base(rope_theta)
  439. logger.info(f"gguf: rope theta = {rope_theta}")
  440. if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
  441. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  442. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  443. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  444. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  445. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  446. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  447. self.gguf_writer.add_expert_count(n_experts)
  448. logger.info(f"gguf: expert count = {n_experts}")
  449. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  450. self.gguf_writer.add_expert_used_count(n_experts_used)
  451. logger.info(f"gguf: experts used count = {n_experts_used}")
  452. if (head_dim := self.hparams.get("head_dim")) is not None:
  453. self.gguf_writer.add_key_length(head_dim)
  454. self.gguf_writer.add_value_length(head_dim)
  455. self.gguf_writer.add_file_type(self.ftype)
  456. logger.info(f"gguf: file type = {self.ftype}")
  457. def write_vocab(self):
  458. if len(self.gguf_writer.tensors) != 1:
  459. raise ValueError('Splitting the vocabulary is not supported')
  460. self.prepare_metadata(vocab_only=True)
  461. self.gguf_writer.write_header_to_file(path=self.fname_out)
  462. self.gguf_writer.write_kv_data_to_file()
  463. self.gguf_writer.close()
  464. def does_token_look_special(self, token: str | bytes) -> bool:
  465. if isinstance(token, (bytes, bytearray)):
  466. token_text = token.decode(encoding="utf-8")
  467. elif isinstance(token, memoryview):
  468. token_text = token.tobytes().decode(encoding="utf-8")
  469. else:
  470. token_text = token
  471. # Some models mark some added tokens which ought to be control tokens as not special.
  472. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  473. seems_special = token_text in (
  474. "<pad>", # deepseek-coder
  475. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  476. )
  477. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  478. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  479. # TODO: should these be marked as UNUSED instead? (maybe not)
  480. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  481. return seems_special
  482. # used for GPT-2 BPE and WordPiece vocabs
  483. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  484. tokens: list[str] = []
  485. toktypes: list[int] = []
  486. from transformers import AutoTokenizer
  487. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  488. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  489. assert max(tokenizer.vocab.values()) < vocab_size
  490. tokpre = self.get_vocab_base_pre(tokenizer)
  491. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  492. added_vocab = tokenizer.get_added_vocab()
  493. added_tokens_decoder = tokenizer.added_tokens_decoder
  494. for i in range(vocab_size):
  495. if i not in reverse_vocab:
  496. tokens.append(f"[PAD{i}]")
  497. toktypes.append(gguf.TokenType.UNUSED)
  498. else:
  499. token: str = reverse_vocab[i]
  500. if token in added_vocab:
  501. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  502. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  503. if not added_tokens_decoder[i].normalized:
  504. previous_token = token
  505. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  506. if previous_token != token:
  507. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  508. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  509. toktypes.append(gguf.TokenType.CONTROL)
  510. else:
  511. # NOTE: this was added for Gemma.
  512. # Encoding and decoding the tokens above isn't sufficient for this case.
  513. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  514. toktypes.append(gguf.TokenType.USER_DEFINED)
  515. else:
  516. toktypes.append(gguf.TokenType.NORMAL)
  517. tokens.append(token)
  518. return tokens, toktypes, tokpre
  519. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  520. # do not modify it manually!
  521. # ref: https://github.com/ggml-org/llama.cpp/pull/6920
  522. # Marker: Start get_vocab_base_pre
  523. def get_vocab_base_pre(self, tokenizer) -> str:
  524. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  525. # is specific for the BPE pre-tokenizer used by the model
  526. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  527. # use in llama.cpp to implement the same pre-tokenizer
  528. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  529. chktok = tokenizer.encode(chktxt)
  530. chkhsh = sha256(str(chktok).encode()).hexdigest()
  531. logger.debug(f"chktok: {chktok}")
  532. logger.debug(f"chkhsh: {chkhsh}")
  533. res = None
  534. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  535. # or pull the latest version of the model from Huggingface
  536. # don't edit the hashes manually!
  537. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  538. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  539. res = "llama-bpe"
  540. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  541. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  542. res = "deepseek-llm"
  543. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  544. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  545. res = "deepseek-coder"
  546. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  547. # ref: https://huggingface.co/tiiuae/falcon-7b
  548. res = "falcon"
  549. if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e":
  550. # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base
  551. res = "falcon3"
  552. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  553. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  554. res = "bert-bge"
  555. if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
  556. # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
  557. res = "bert-bge-large"
  558. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  559. # ref: https://huggingface.co/mosaicml/mpt-7b
  560. res = "mpt"
  561. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  562. # ref: https://huggingface.co/bigcode/starcoder2-3b
  563. res = "starcoder"
  564. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  565. # ref: https://huggingface.co/openai-community/gpt2
  566. res = "gpt-2"
  567. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  568. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  569. res = "stablelm2"
  570. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  571. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  572. res = "refact"
  573. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  574. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  575. res = "command-r"
  576. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  577. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  578. res = "qwen2"
  579. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  580. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  581. res = "olmo"
  582. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  583. # ref: https://huggingface.co/databricks/dbrx-base
  584. res = "dbrx"
  585. if chkhsh == "c7699093ba4255a91e702aa38a596aa81669f3525dae06c2953267dde580f448":
  586. # ref: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  587. res = "jina-v1-en"
  588. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  589. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  590. res = "jina-v2-en"
  591. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  592. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  593. res = "jina-v2-es"
  594. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  595. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  596. res = "jina-v2-de"
  597. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  598. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  599. res = "smaug-bpe"
  600. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  601. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  602. res = "poro-chat"
  603. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  604. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  605. res = "jina-v2-code"
  606. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b" or chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
  607. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  608. res = "chatglm-bpe"
  609. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  610. # ref: https://huggingface.co/LumiOpen/Viking-7B
  611. res = "viking"
  612. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  613. # ref: https://huggingface.co/core42/jais-13b
  614. res = "jais"
  615. if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f":
  616. # ref: https://huggingface.co/WisdomShell/CodeShell-7B
  617. res = "codeshell"
  618. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  619. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  620. res = "tekken"
  621. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  622. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  623. res = "smollm"
  624. if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
  625. # ref: https://huggingface.co/bigscience/bloom
  626. res = "bloom"
  627. if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
  628. # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
  629. res = "gpt3-finnish"
  630. if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
  631. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
  632. res = "exaone"
  633. if chkhsh == "fcace8b9cac38ce847670c970cd5892031a753a1ef381abd1d9af00f713da085":
  634. # ref: https://huggingface.co/microsoft/phi-2
  635. res = "phi-2"
  636. if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
  637. # ref: https://huggingface.co/facebook/chameleon-7b
  638. res = "chameleon"
  639. if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
  640. # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
  641. res = "minerva-7b"
  642. if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65":
  643. # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base
  644. res = "roberta-bpe"
  645. if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb":
  646. # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct
  647. res = "gigachat"
  648. if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
  649. # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
  650. res = "megrez"
  651. if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
  652. # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
  653. res = "deepseek-v3"
  654. if chkhsh == "b3f499bb4255f8ca19fccd664443283318f2fd2414d5e0b040fbdd0cc195d6c5":
  655. # ref: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
  656. res = "deepseek-r1-qwen"
  657. if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e":
  658. # ref: https://huggingface.co/Xenova/gpt-4o
  659. res = "gpt-4o"
  660. if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
  661. # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
  662. res = "superbpe"
  663. if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
  664. # ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
  665. res = "trillion"
  666. if chkhsh == "96a5f08be6259352137b512d4157e333e21df7edd3fcd152990608735a65b224":
  667. # ref: https://huggingface.co/inclusionAI/Ling-lite
  668. res = "bailingmoe"
  669. if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
  670. # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
  671. res = "llama4"
  672. if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
  673. # ref: https://huggingface.co/THUDM/glm-4-9b-hf
  674. res = "glm4"
  675. if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3":
  676. # ref: https://huggingface.co/mistral-community/pixtral-12b
  677. res = "pixtral"
  678. if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
  679. # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
  680. res = "seed-coder"
  681. if res is None:
  682. logger.warning("\n")
  683. logger.warning("**************************************************************************************")
  684. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  685. logger.warning("** There are 2 possible reasons for this:")
  686. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  687. logger.warning("** - the pre-tokenization config has changed upstream")
  688. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  689. logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
  690. logger.warning("**")
  691. logger.warning(f"** chkhsh: {chkhsh}")
  692. logger.warning("**************************************************************************************")
  693. logger.warning("\n")
  694. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  695. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  696. logger.debug(f"chkhsh: {chkhsh}")
  697. return res
  698. # Marker: End get_vocab_base_pre
  699. def _set_vocab_none(self) -> None:
  700. self.gguf_writer.add_tokenizer_model("none")
  701. def _set_vocab_gpt2(self) -> None:
  702. tokens, toktypes, tokpre = self.get_vocab_base()
  703. self.gguf_writer.add_tokenizer_model("gpt2")
  704. self.gguf_writer.add_tokenizer_pre(tokpre)
  705. self.gguf_writer.add_token_list(tokens)
  706. self.gguf_writer.add_token_types(toktypes)
  707. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  708. special_vocab.add_to_gguf(self.gguf_writer)
  709. def _set_vocab_qwen(self):
  710. dir_model = self.dir_model
  711. hparams = self.hparams
  712. tokens: list[str] = []
  713. toktypes: list[int] = []
  714. from transformers import AutoTokenizer
  715. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  716. vocab_size = hparams["vocab_size"]
  717. assert max(tokenizer.get_vocab().values()) < vocab_size
  718. tokpre = self.get_vocab_base_pre(tokenizer)
  719. merges = []
  720. vocab = {}
  721. mergeable_ranks = tokenizer.mergeable_ranks
  722. for token, rank in mergeable_ranks.items():
  723. vocab[QwenModel.token_bytes_to_string(token)] = rank
  724. if len(token) == 1:
  725. continue
  726. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  727. assert len(merged) == 2
  728. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  729. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  730. added_vocab = tokenizer.special_tokens
  731. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  732. for i in range(vocab_size):
  733. if i not in reverse_vocab:
  734. tokens.append(f"[PAD{i}]")
  735. toktypes.append(gguf.TokenType.UNUSED)
  736. elif reverse_vocab[i] in added_vocab:
  737. tokens.append(reverse_vocab[i])
  738. toktypes.append(gguf.TokenType.CONTROL)
  739. else:
  740. tokens.append(reverse_vocab[i])
  741. toktypes.append(gguf.TokenType.NORMAL)
  742. self.gguf_writer.add_tokenizer_model("gpt2")
  743. self.gguf_writer.add_tokenizer_pre(tokpre)
  744. self.gguf_writer.add_token_list(tokens)
  745. self.gguf_writer.add_token_types(toktypes)
  746. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  747. special_vocab.merges = merges
  748. # only add special tokens when they were not already loaded from config.json
  749. if len(special_vocab.special_token_ids) == 0:
  750. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  751. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  752. # this one is usually not in config.json anyway
  753. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  754. special_vocab.add_to_gguf(self.gguf_writer)
  755. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  756. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  757. self.gguf_writer.add_tokenizer_model("llama")
  758. self.gguf_writer.add_tokenizer_pre("default")
  759. self.gguf_writer.add_token_list(tokens)
  760. self.gguf_writer.add_token_scores(scores)
  761. self.gguf_writer.add_token_types(toktypes)
  762. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  763. special_vocab.add_to_gguf(self.gguf_writer)
  764. def _create_vocab_sentencepiece(self):
  765. from sentencepiece import SentencePieceProcessor
  766. tokenizer_path = self.dir_model / 'tokenizer.model'
  767. if not tokenizer_path.is_file():
  768. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  769. tokenizer = SentencePieceProcessor()
  770. tokenizer.LoadFromFile(str(tokenizer_path))
  771. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  772. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  773. scores: list[float] = [-10000.0] * vocab_size
  774. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  775. for token_id in range(tokenizer.vocab_size()):
  776. piece = tokenizer.IdToPiece(token_id)
  777. text = piece.encode("utf-8")
  778. score = tokenizer.GetScore(token_id)
  779. toktype = SentencePieceTokenTypes.NORMAL
  780. if tokenizer.IsUnknown(token_id):
  781. toktype = SentencePieceTokenTypes.UNKNOWN
  782. elif tokenizer.IsControl(token_id):
  783. toktype = SentencePieceTokenTypes.CONTROL
  784. elif tokenizer.IsUnused(token_id):
  785. toktype = SentencePieceTokenTypes.UNUSED
  786. elif tokenizer.IsByte(token_id):
  787. toktype = SentencePieceTokenTypes.BYTE
  788. tokens[token_id] = text
  789. scores[token_id] = score
  790. toktypes[token_id] = toktype
  791. added_tokens_file = self.dir_model / 'added_tokens.json'
  792. if added_tokens_file.is_file():
  793. with open(added_tokens_file, "r", encoding="utf-8") as f:
  794. added_tokens_json = json.load(f)
  795. for key in added_tokens_json:
  796. token_id = added_tokens_json[key]
  797. if token_id >= vocab_size:
  798. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  799. continue
  800. tokens[token_id] = key.encode("utf-8")
  801. scores[token_id] = -1000.0
  802. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  803. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  804. if tokenizer_config_file.is_file():
  805. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  806. tokenizer_config_json = json.load(f)
  807. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  808. for token_id, token_data in added_tokens_decoder.items():
  809. token_id = int(token_id)
  810. token: str = token_data["content"]
  811. if token_id >= vocab_size:
  812. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  813. continue
  814. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  815. if tokens[token_id] != token.encode("utf-8"):
  816. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  817. if token_data.get("special") or self.does_token_look_special(token):
  818. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  819. else:
  820. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  821. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  822. scores[token_id] = -1000.0
  823. tokens[token_id] = token.encode("utf-8")
  824. if vocab_size > len(tokens):
  825. pad_count = vocab_size - len(tokens)
  826. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  827. for i in range(1, pad_count + 1):
  828. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  829. scores.append(-1000.0)
  830. toktypes.append(SentencePieceTokenTypes.UNUSED)
  831. return tokens, scores, toktypes
  832. def _set_vocab_llama_hf(self):
  833. vocab = gguf.LlamaHfVocab(self.dir_model)
  834. tokens = []
  835. scores = []
  836. toktypes = []
  837. for text, score, toktype in vocab.all_tokens():
  838. tokens.append(text)
  839. scores.append(score)
  840. toktypes.append(toktype)
  841. assert len(tokens) == vocab.vocab_size
  842. self.gguf_writer.add_tokenizer_model("llama")
  843. self.gguf_writer.add_tokenizer_pre("default")
  844. self.gguf_writer.add_token_list(tokens)
  845. self.gguf_writer.add_token_scores(scores)
  846. self.gguf_writer.add_token_types(toktypes)
  847. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  848. special_vocab.add_to_gguf(self.gguf_writer)
  849. def _set_vocab_rwkv_world(self):
  850. assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file()
  851. vocab_size = self.hparams.get("vocab_size", 65536)
  852. tokens: list[bytes] = ['<s>'.encode("utf-8")]
  853. toktypes: list[int] = [gguf.TokenType.CONTROL]
  854. with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f:
  855. lines = f.readlines()
  856. for line in lines:
  857. parts = line.split(' ')
  858. assert len(parts) >= 3
  859. token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1])
  860. token = token.encode("utf-8") if isinstance(token, str) else token
  861. assert isinstance(token, bytes)
  862. assert len(token) == token_len
  863. token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff"
  864. tokens.append(token_text.encode("utf-8"))
  865. toktypes.append(gguf.TokenType.NORMAL)
  866. remainder = vocab_size - len(tokens)
  867. assert remainder >= 0
  868. for i in range(len(tokens), vocab_size):
  869. tokens.append(f"[PAD{i}]".encode("utf-8"))
  870. toktypes.append(gguf.TokenType.UNUSED)
  871. self.gguf_writer.add_tokenizer_model("rwkv")
  872. self.gguf_writer.add_token_list(tokens)
  873. self.gguf_writer.add_token_types(toktypes)
  874. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  875. special_vocab.chat_template = "rwkv-world"
  876. # hack: Add '\n\n' as the EOT token to make it chat normally
  877. special_vocab._set_special_token("eot", 261)
  878. special_vocab.add_to_gguf(self.gguf_writer)
  879. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  880. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  881. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  882. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  883. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  884. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  885. assert field # tokenizer model
  886. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  887. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  888. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  889. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  890. assert field # token list
  891. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  892. if model_name == "llama-spm":
  893. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  894. assert field # token scores
  895. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  896. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  897. assert field # token types
  898. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  899. if model_name != "llama-spm":
  900. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  901. assert field # token merges
  902. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  903. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  904. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  905. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  906. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  907. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  908. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  909. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  910. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  911. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  912. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  913. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  914. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  915. def _try_set_pooling_type(self) -> None:
  916. # get pooling path
  917. pooling_path = None
  918. module_path = self.dir_model / "modules.json"
  919. if module_path.is_file():
  920. with open(module_path, encoding="utf-8") as f:
  921. modules = json.load(f)
  922. for mod in modules:
  923. if mod["type"] == "sentence_transformers.models.Pooling":
  924. pooling_path = mod["path"]
  925. break
  926. # get pooling type
  927. if pooling_path is not None:
  928. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  929. pooling = json.load(f)
  930. if pooling["pooling_mode_mean_tokens"]:
  931. pooling_type = gguf.PoolingType.MEAN
  932. elif pooling["pooling_mode_cls_token"]:
  933. pooling_type = gguf.PoolingType.CLS
  934. elif pooling["pooling_mode_lasttoken"]:
  935. pooling_type = gguf.PoolingType.LAST
  936. else:
  937. raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported")
  938. self.gguf_writer.add_pooling_type(pooling_type)
  939. class VisionModel(ModelBase):
  940. model_type = ModelType.VISION
  941. model_arch = gguf.MODEL_ARCH.CLIP_VISION
  942. preprocessor_config: dict[str, Any]
  943. global_config: dict[str, Any]
  944. def __init__(self, *args, **kwargs):
  945. super().__init__(*args, **kwargs)
  946. if self.model_arch != gguf.MODEL_ARCH.CLIP_VISION:
  947. raise TypeError("VisionModel must be subclassed with model_arch = gguf.MODEL_ARCH.CLIP_VISION")
  948. # get n_embd of the text model
  949. if "text_config" not in self.hparams:
  950. self.hparams["text_config"] = {}
  951. text_config = {**self.hparams, **self.hparams["text_config"]}
  952. self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0))
  953. assert self.n_embd_text > 0, "n_embd not found in hparams"
  954. if "vision_config" not in self.hparams:
  955. raise ValueError("vision_config not found in hparams")
  956. # move vision config to the top level, while preserving the original hparams in global_config
  957. self.global_config = self.hparams
  958. self.hparams = self.hparams["vision_config"]
  959. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"])
  960. self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.CLIP_VISION, self.block_count)
  961. # load preprocessor config
  962. with open(self.dir_model / "preprocessor_config.json", "r", encoding="utf-8") as f:
  963. self.preprocessor_config = json.load(f)
  964. def set_type(self):
  965. self.gguf_writer.add_type(gguf.GGUFType.CLIP_VISION)
  966. def set_gguf_parameters(self):
  967. self.gguf_writer.add_file_type(self.ftype)
  968. self.gguf_writer.add_vision_projection_dim(self.n_embd_text)
  969. self.gguf_writer.add_vision_has_vision_encoder(True)
  970. # vision config
  971. self.gguf_writer.add_vision_image_size(self.find_hparam(["image_size"]))
  972. self.gguf_writer.add_vision_patch_size(self.find_hparam(["patch_size"]))
  973. self.gguf_writer.add_vision_embedding_length(self.find_hparam(["hidden_size"]))
  974. self.gguf_writer.add_vision_feed_forward_length(self.find_hparam(["intermediate_size"]))
  975. self.gguf_writer.add_vision_block_count(self.block_count)
  976. self.gguf_writer.add_vision_head_count(self.find_hparam(["num_attention_heads"]))
  977. # preprocessor config
  978. self.gguf_writer.add_vision_image_mean(self.preprocessor_config["image_mean"])
  979. self.gguf_writer.add_vision_image_std(self.preprocessor_config["image_std"])
  980. def write_vocab(self):
  981. raise ValueError("VisionModel does not support vocab writing")
  982. @ModelBase.register("GPTNeoXForCausalLM")
  983. class GPTNeoXModel(TextModel):
  984. model_arch = gguf.MODEL_ARCH.GPTNEOX
  985. def set_gguf_parameters(self):
  986. block_count = self.hparams["num_hidden_layers"]
  987. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  988. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  989. self.gguf_writer.add_block_count(block_count)
  990. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  991. self.gguf_writer.add_rope_dimension_count(
  992. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  993. )
  994. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  995. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  996. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  997. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  998. del bid # unused
  999. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1000. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1001. tensors: list[tuple[str, Tensor]] = []
  1002. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  1003. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1004. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1005. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1006. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1007. data_torch = torch.cat(
  1008. (
  1009. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1010. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1011. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1012. ),
  1013. dim=0,
  1014. )
  1015. logger.info("re-format attention.linear_qkv.weight")
  1016. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  1017. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1018. data_torch = torch.cat(
  1019. (
  1020. qkv_bias[:, 0, :].reshape((n_embed,)),
  1021. qkv_bias[:, 1, :].reshape((n_embed,)),
  1022. qkv_bias[:, 2, :].reshape((n_embed,)),
  1023. ),
  1024. dim=0,
  1025. )
  1026. logger.info("re-format attention.linear_qkv.bias")
  1027. tensors.append((self.map_tensor_name(name), data_torch))
  1028. return tensors
  1029. @ModelBase.register("BloomForCausalLM", "BloomModel")
  1030. class BloomModel(TextModel):
  1031. model_arch = gguf.MODEL_ARCH.BLOOM
  1032. def set_gguf_parameters(self):
  1033. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1034. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1035. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  1036. self.gguf_writer.add_embedding_length(n_embed)
  1037. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  1038. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1039. self.gguf_writer.add_head_count(n_head)
  1040. self.gguf_writer.add_head_count_kv(n_head)
  1041. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1042. self.gguf_writer.add_file_type(self.ftype)
  1043. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1044. del bid # unused
  1045. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1046. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1047. name = re.sub(r'transformer\.', '', name)
  1048. tensors: list[tuple[str, Tensor]] = []
  1049. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  1050. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1051. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1052. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1053. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1054. data_torch = torch.cat(
  1055. (
  1056. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1057. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1058. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1059. ),
  1060. dim=0,
  1061. )
  1062. logger.info("re-format attention.linear_qkv.weight")
  1063. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  1064. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1065. data_torch = torch.cat(
  1066. (
  1067. qkv_bias[:, 0, :].reshape((n_embed,)),
  1068. qkv_bias[:, 1, :].reshape((n_embed,)),
  1069. qkv_bias[:, 2, :].reshape((n_embed,)),
  1070. ),
  1071. dim=0,
  1072. )
  1073. logger.info("re-format attention.linear_qkv.bias")
  1074. tensors.append((self.map_tensor_name(name), data_torch))
  1075. return tensors
  1076. @ModelBase.register("MPTForCausalLM")
  1077. class MPTModel(TextModel):
  1078. model_arch = gguf.MODEL_ARCH.MPT
  1079. def set_vocab(self):
  1080. try:
  1081. self._set_vocab_gpt2()
  1082. except Exception:
  1083. # Fallback for SEA-LION model
  1084. self._set_vocab_sentencepiece()
  1085. self.gguf_writer.add_add_bos_token(False)
  1086. self.gguf_writer.add_pad_token_id(3)
  1087. self.gguf_writer.add_eos_token_id(1)
  1088. self.gguf_writer.add_unk_token_id(0)
  1089. def set_gguf_parameters(self):
  1090. block_count = self.hparams["n_layers"]
  1091. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1092. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1093. self.gguf_writer.add_block_count(block_count)
  1094. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  1095. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1096. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  1097. self.gguf_writer.add_head_count_kv(kv_n_heads)
  1098. self.gguf_writer.add_layer_norm_eps(1e-5)
  1099. if self.hparams["attn_config"]["clip_qkv"] is not None:
  1100. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  1101. if self.hparams["attn_config"]["alibi"]:
  1102. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  1103. else:
  1104. self.gguf_writer.add_max_alibi_bias(0.0)
  1105. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1106. del bid # unused
  1107. if "scales" in name:
  1108. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  1109. new_name = new_name.replace("scales", "act.scales")
  1110. else:
  1111. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  1112. return [(new_name, data_torch)]
  1113. @ModelBase.register("OrionForCausalLM")
  1114. class OrionModel(TextModel):
  1115. model_arch = gguf.MODEL_ARCH.ORION
  1116. def set_vocab(self):
  1117. self._set_vocab_sentencepiece()
  1118. def set_gguf_parameters(self):
  1119. block_count = self.hparams["num_hidden_layers"]
  1120. head_count = self.hparams["num_attention_heads"]
  1121. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1122. ctx_length = 0
  1123. if "max_sequence_length" in self.hparams:
  1124. ctx_length = self.hparams["max_sequence_length"]
  1125. elif "max_position_embeddings" in self.hparams:
  1126. ctx_length = self.hparams["max_position_embeddings"]
  1127. elif "model_max_length" in self.hparams:
  1128. ctx_length = self.hparams["model_max_length"]
  1129. else:
  1130. raise ValueError("gguf: can not find ctx length parameter.")
  1131. self.gguf_writer.add_file_type(self.ftype)
  1132. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1133. self.gguf_writer.add_context_length(ctx_length)
  1134. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1135. self.gguf_writer.add_block_count(block_count)
  1136. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1137. self.gguf_writer.add_head_count(head_count)
  1138. self.gguf_writer.add_head_count_kv(head_count_kv)
  1139. # note: config provides rms norm but it is actually layer norm
  1140. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  1141. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  1142. @ModelBase.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  1143. class BaichuanModel(TextModel):
  1144. model_arch = gguf.MODEL_ARCH.BAICHUAN
  1145. def set_vocab(self):
  1146. self._set_vocab_sentencepiece()
  1147. def set_gguf_parameters(self):
  1148. block_count = self.hparams["num_hidden_layers"]
  1149. head_count = self.hparams["num_attention_heads"]
  1150. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1151. ctx_length = 0
  1152. if "max_sequence_length" in self.hparams:
  1153. ctx_length = self.hparams["max_sequence_length"]
  1154. elif "max_position_embeddings" in self.hparams:
  1155. ctx_length = self.hparams["max_position_embeddings"]
  1156. elif "model_max_length" in self.hparams:
  1157. ctx_length = self.hparams["model_max_length"]
  1158. else:
  1159. raise ValueError("gguf: can not find ctx length parameter.")
  1160. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1161. self.gguf_writer.add_context_length(ctx_length)
  1162. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1163. self.gguf_writer.add_block_count(block_count)
  1164. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1165. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1166. self.gguf_writer.add_head_count(head_count)
  1167. self.gguf_writer.add_head_count_kv(head_count_kv)
  1168. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1169. self.gguf_writer.add_file_type(self.ftype)
  1170. rope_scaling = self.hparams.get("rope_scaling") or {}
  1171. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1172. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1173. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1174. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1175. head_count = self.hparams["num_attention_heads"]
  1176. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1177. tensors: list[tuple[str, Tensor]] = []
  1178. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  1179. logger.info(f"Unpacking and permuting layer {bid}")
  1180. tensors = [
  1181. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  1182. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  1183. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  1184. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  1185. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  1186. self._reverse_hf_part(data_torch, 2)),
  1187. ]
  1188. else:
  1189. tensors = [(self.map_tensor_name(name), data_torch)]
  1190. return tensors
  1191. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1192. if n_kv_head is not None and n_head != n_kv_head:
  1193. n_head //= n_kv_head
  1194. return (
  1195. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1196. .swapaxes(1, 2)
  1197. .reshape(weights.shape)
  1198. )
  1199. def _reverse_hf_permute_part(
  1200. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  1201. ) -> Tensor:
  1202. r = weights.shape[0] // 3
  1203. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  1204. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  1205. r = weights.shape[0] // 3
  1206. return weights[r * n_part:r * n_part + r, ...]
  1207. @ModelBase.register("XverseForCausalLM")
  1208. class XverseModel(TextModel):
  1209. model_arch = gguf.MODEL_ARCH.XVERSE
  1210. def set_vocab(self):
  1211. assert (self.dir_model / "tokenizer.json").is_file()
  1212. dir_model = self.dir_model
  1213. hparams = self.hparams
  1214. tokens: list[bytes] = []
  1215. toktypes: list[int] = []
  1216. from transformers import AutoTokenizer
  1217. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  1218. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  1219. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  1220. # because vocab_size is the count of items, and indexes start at 0.
  1221. max_vocab_index = max(tokenizer.get_vocab().values())
  1222. if max_vocab_index >= vocab_size:
  1223. raise ValueError("Vocabulary size exceeds expected maximum size.")
  1224. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  1225. added_vocab = tokenizer.get_added_vocab()
  1226. for token_id in range(vocab_size):
  1227. token_text = reverse_vocab[token_id].encode('utf-8')
  1228. # replace "\x00" to string with length > 0
  1229. if token_text == b"\x00":
  1230. toktype = gguf.TokenType.BYTE # special
  1231. token_text = f"<{token_text}>".encode('utf-8')
  1232. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  1233. toktype = gguf.TokenType.BYTE # special
  1234. elif reverse_vocab[token_id] in added_vocab:
  1235. if tokenizer.added_tokens_decoder[token_id].special:
  1236. toktype = gguf.TokenType.CONTROL
  1237. else:
  1238. toktype = gguf.TokenType.USER_DEFINED
  1239. else:
  1240. toktype = gguf.TokenType.NORMAL
  1241. tokens.append(token_text)
  1242. toktypes.append(toktype)
  1243. self.gguf_writer.add_tokenizer_model("llama")
  1244. self.gguf_writer.add_tokenizer_pre("default")
  1245. self.gguf_writer.add_token_list(tokens)
  1246. self.gguf_writer.add_token_types(toktypes)
  1247. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  1248. special_vocab.add_to_gguf(self.gguf_writer)
  1249. def set_gguf_parameters(self):
  1250. block_count = self.hparams["num_hidden_layers"]
  1251. head_count = self.hparams["num_attention_heads"]
  1252. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1253. ctx_length = 0
  1254. if "max_sequence_length" in self.hparams:
  1255. ctx_length = self.hparams["max_sequence_length"]
  1256. elif "max_position_embeddings" in self.hparams:
  1257. ctx_length = self.hparams["max_position_embeddings"]
  1258. elif "model_max_length" in self.hparams:
  1259. ctx_length = self.hparams["model_max_length"]
  1260. else:
  1261. raise ValueError("gguf: can not find ctx length parameter.")
  1262. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1263. self.gguf_writer.add_context_length(ctx_length)
  1264. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1265. self.gguf_writer.add_block_count(block_count)
  1266. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1267. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1268. self.gguf_writer.add_head_count(head_count)
  1269. self.gguf_writer.add_head_count_kv(head_count_kv)
  1270. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1271. self.gguf_writer.add_file_type(self.ftype)
  1272. rope_scaling = self.hparams.get("rope_scaling") or {}
  1273. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1274. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1275. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1276. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1277. del bid # unused
  1278. head_count = self.hparams["num_attention_heads"]
  1279. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1280. # HF models permute some of the tensors, so we need to undo that
  1281. if name.endswith("q_proj.weight"):
  1282. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1283. if name.endswith("k_proj.weight"):
  1284. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1285. return [(self.map_tensor_name(name), data_torch)]
  1286. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1287. if n_kv_head is not None and n_head != n_kv_head:
  1288. n_head //= n_kv_head
  1289. return (
  1290. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1291. .swapaxes(1, 2)
  1292. .reshape(weights.shape)
  1293. )
  1294. @ModelBase.register("FalconForCausalLM", "RWForCausalLM")
  1295. class FalconModel(TextModel):
  1296. model_arch = gguf.MODEL_ARCH.FALCON
  1297. def set_gguf_parameters(self):
  1298. block_count = self.hparams.get("num_hidden_layers")
  1299. if block_count is None:
  1300. block_count = self.hparams["n_layer"] # old name
  1301. n_head = self.hparams.get("num_attention_heads")
  1302. if n_head is None:
  1303. n_head = self.hparams["n_head"] # old name
  1304. n_head_kv = self.hparams.get("num_kv_heads")
  1305. if n_head_kv is None:
  1306. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1307. self.gguf_writer.add_context_length(2048) # not in config.json
  1308. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1309. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1310. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1311. self.gguf_writer.add_block_count(block_count)
  1312. self.gguf_writer.add_head_count(n_head)
  1313. self.gguf_writer.add_head_count_kv(n_head_kv)
  1314. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1315. self.gguf_writer.add_file_type(self.ftype)
  1316. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1317. del bid # unused
  1318. # QKV tensor transform
  1319. # The original query_key_value tensor contains n_head_kv "kv groups",
  1320. # each consisting of n_head/n_head_kv query weights followed by one key
  1321. # and one value weight (shared by all query heads in the kv group).
  1322. # This layout makes it a big pain to work with in GGML.
  1323. # So we rearrange them here,, so that we have n_head query weights
  1324. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1325. # in contiguous fashion.
  1326. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1327. if "query_key_value" in name:
  1328. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1329. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1330. head_dim = self.hparams["hidden_size"] // n_head
  1331. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1332. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1333. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1334. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1335. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1336. return [(self.map_tensor_name(name), data_torch)]
  1337. @ModelBase.register("GPTBigCodeForCausalLM")
  1338. class StarCoderModel(TextModel):
  1339. model_arch = gguf.MODEL_ARCH.STARCODER
  1340. def set_gguf_parameters(self):
  1341. block_count = self.hparams["n_layer"]
  1342. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1343. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1344. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1345. self.gguf_writer.add_block_count(block_count)
  1346. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1347. self.gguf_writer.add_head_count_kv(1)
  1348. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1349. self.gguf_writer.add_file_type(self.ftype)
  1350. @ModelBase.register("GPTRefactForCausalLM")
  1351. class RefactModel(TextModel):
  1352. model_arch = gguf.MODEL_ARCH.REFACT
  1353. def set_vocab(self):
  1354. super().set_vocab()
  1355. # TODO: how to determine special FIM tokens automatically?
  1356. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1357. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1358. special_vocab._set_special_token("prefix", 1)
  1359. special_vocab._set_special_token("suffix", 3)
  1360. special_vocab._set_special_token("middle", 2)
  1361. special_vocab.chat_template = None # do not add it twice
  1362. special_vocab.add_to_gguf(self.gguf_writer)
  1363. def set_gguf_parameters(self):
  1364. hidden_dim = self.hparams["n_embd"]
  1365. inner_dim = 4 * hidden_dim
  1366. hidden_dim = int(2 * inner_dim / 3)
  1367. multiple_of = 256
  1368. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1369. block_count = self.hparams["n_layer"]
  1370. # refact uses Alibi. So this is from config.json which might be used by training.
  1371. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1372. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1373. self.gguf_writer.add_feed_forward_length(ff_dim)
  1374. self.gguf_writer.add_block_count(block_count)
  1375. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1376. self.gguf_writer.add_head_count_kv(1)
  1377. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1378. self.gguf_writer.add_file_type(self.ftype)
  1379. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1380. hidden_dim = self.hparams["n_embd"]
  1381. inner_dim = 4 * hidden_dim
  1382. hidden_dim = int(2 * inner_dim / 3)
  1383. multiple_of = 256
  1384. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1385. n_head = self.hparams["n_head"]
  1386. n_head_kv = 1
  1387. head_dim = self.hparams["n_embd"] // n_head
  1388. tensors: list[tuple[str, Tensor]] = []
  1389. if bid is not None:
  1390. if name == f"transformer.h.{bid}.attn.kv.weight":
  1391. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1392. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1393. elif name == f"transformer.h.{bid}.attn.q.weight":
  1394. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1395. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1396. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1397. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1398. if len(tensors) == 0:
  1399. tensors.append((self.map_tensor_name(name), data_torch))
  1400. return tensors
  1401. @ModelBase.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1402. class StableLMModel(TextModel):
  1403. model_arch = gguf.MODEL_ARCH.STABLELM
  1404. def set_vocab(self):
  1405. if (self.dir_model / "tokenizer.json").is_file():
  1406. self._set_vocab_gpt2()
  1407. else:
  1408. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1409. self._set_vocab_qwen()
  1410. def set_gguf_parameters(self):
  1411. hparams = self.hparams
  1412. block_count = hparams["num_hidden_layers"]
  1413. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1414. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1415. self.gguf_writer.add_block_count(block_count)
  1416. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1417. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1418. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1419. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1420. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1421. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1422. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1423. self.gguf_writer.add_file_type(self.ftype)
  1424. _q_norms: list[dict[str, Tensor]] | None = None
  1425. _k_norms: list[dict[str, Tensor]] | None = None
  1426. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1427. n_head = self.hparams["num_attention_heads"]
  1428. n_kv_head = self.hparams["num_key_value_heads"]
  1429. if name.find("q_layernorm.norms") != -1:
  1430. assert bid is not None
  1431. if self._q_norms is None:
  1432. self._q_norms = [{} for _ in range(self.block_count)]
  1433. self._q_norms[bid][name] = data_torch
  1434. if len(self._q_norms[bid]) >= n_head:
  1435. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1436. else:
  1437. return []
  1438. if name.find("k_layernorm.norms") != -1:
  1439. assert bid is not None
  1440. if self._k_norms is None:
  1441. self._k_norms = [{} for _ in range(self.block_count)]
  1442. self._k_norms[bid][name] = data_torch
  1443. if len(self._k_norms[bid]) >= n_kv_head:
  1444. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1445. else:
  1446. return []
  1447. return [(self.map_tensor_name(name), data_torch)]
  1448. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1449. datas: list[Tensor] = []
  1450. # extract the norms in order
  1451. for xid in range(n_head):
  1452. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1453. datas.append(norms[ename])
  1454. del norms[ename]
  1455. data_torch = torch.stack(datas, dim=0)
  1456. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1457. new_name = self.map_tensor_name(merged_name)
  1458. return [(new_name, data_torch)]
  1459. def prepare_tensors(self):
  1460. super().prepare_tensors()
  1461. if self._q_norms is not None or self._k_norms is not None:
  1462. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1463. norms = (
  1464. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1465. ) + (
  1466. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1467. )
  1468. if len(norms) > 0:
  1469. raise ValueError(f"Unprocessed norms: {norms}")
  1470. @ModelBase.register(
  1471. "LLaMAForCausalLM",
  1472. "LlamaForCausalLM",
  1473. "MistralForCausalLM",
  1474. "MixtralForCausalLM",
  1475. "VLlama3ForCausalLM",
  1476. "LlavaForConditionalGeneration")
  1477. class LlamaModel(TextModel):
  1478. model_arch = gguf.MODEL_ARCH.LLAMA
  1479. undo_permute = True
  1480. def __init__(self, *args, **kwargs):
  1481. super().__init__(*args, **kwargs)
  1482. # fix for SmolVLM2, missing `num_attention_heads` in config.json
  1483. if self.hf_arch == "VLlama3ForCausalLM":
  1484. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32)
  1485. def set_vocab(self):
  1486. try:
  1487. self._set_vocab_sentencepiece()
  1488. except FileNotFoundError:
  1489. try:
  1490. self._set_vocab_llama_hf()
  1491. except (FileNotFoundError, TypeError):
  1492. # Llama 3
  1493. self._set_vocab_gpt2()
  1494. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1495. if self.hparams.get("vocab_size", 32000) == 32016:
  1496. special_vocab = gguf.SpecialVocab(
  1497. self.dir_model, load_merges=False,
  1498. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1499. )
  1500. special_vocab._set_special_token("prefix", 32007)
  1501. special_vocab._set_special_token("suffix", 32008)
  1502. special_vocab._set_special_token("middle", 32009)
  1503. special_vocab._set_special_token("eot", 32010)
  1504. special_vocab.add_to_gguf(self.gguf_writer)
  1505. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1506. if tokenizer_config_file.is_file():
  1507. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1508. tokenizer_config_json = json.load(f)
  1509. if "add_prefix_space" in tokenizer_config_json:
  1510. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1511. # Apply to granite small models only
  1512. if self.hparams.get("vocab_size", 32000) == 49152:
  1513. self.gguf_writer.add_add_bos_token(False)
  1514. def set_gguf_parameters(self):
  1515. super().set_gguf_parameters()
  1516. hparams = self.hparams
  1517. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1518. if "head_dim" in hparams:
  1519. rope_dim = hparams["head_dim"]
  1520. else:
  1521. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1522. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1523. rope_scaling = self.hparams.get("rope_scaling") or {}
  1524. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1525. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1526. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1527. @staticmethod
  1528. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1529. if n_head_kv is not None and n_head != n_head_kv:
  1530. n_head = n_head_kv
  1531. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1532. .swapaxes(1, 2)
  1533. .reshape(weights.shape))
  1534. _experts: list[dict[str, Tensor]] | None = None
  1535. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1536. n_head = self.hparams["num_attention_heads"]
  1537. n_kv_head = self.hparams.get("num_key_value_heads")
  1538. is_vision_tensor = "vision_tower" in name \
  1539. or "vision_model" in name \
  1540. or "model.connector" in name \
  1541. or "multi_modal_projector" in name
  1542. if is_vision_tensor:
  1543. return [] # skip vision tensors
  1544. elif name.startswith("model.text_model"):
  1545. name = name.replace("text_model.", "") # for SmolVLM
  1546. elif name.startswith("language_model."):
  1547. name = name.replace("language_model.", "") # for the rest
  1548. if self.undo_permute:
  1549. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1550. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1551. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1552. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1553. # process the experts separately
  1554. if name.find("block_sparse_moe.experts") != -1:
  1555. n_experts = self.hparams["num_local_experts"]
  1556. assert bid is not None
  1557. if self._experts is None:
  1558. self._experts = [{} for _ in range(self.block_count)]
  1559. self._experts[bid][name] = data_torch
  1560. if len(self._experts[bid]) >= n_experts * 3:
  1561. tensors: list[tuple[str, Tensor]] = []
  1562. # merge the experts into a single 3d tensor
  1563. for wid in ["w1", "w2", "w3"]:
  1564. datas: list[Tensor] = []
  1565. for xid in range(n_experts):
  1566. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1567. datas.append(self._experts[bid][ename])
  1568. del self._experts[bid][ename]
  1569. data_torch = torch.stack(datas, dim=0)
  1570. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1571. new_name = self.map_tensor_name(merged_name)
  1572. tensors.append((new_name, data_torch))
  1573. return tensors
  1574. else:
  1575. return []
  1576. return [(self.map_tensor_name(name), data_torch)]
  1577. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1578. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1579. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1580. base = self.hparams.get("rope_theta", 10000.0)
  1581. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1582. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1583. factor = rope_scaling.get("factor", 8.0)
  1584. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1585. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1586. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1587. low_freq_wavelen = old_context_len / low_freq_factor
  1588. high_freq_wavelen = old_context_len / high_freq_factor
  1589. # assert low_freq_wavelen != high_freq_wavelen # Errors for Llama4
  1590. rope_factors = []
  1591. for freq in freqs:
  1592. wavelen = 2 * math.pi / freq
  1593. if wavelen < high_freq_wavelen:
  1594. rope_factors.append(1)
  1595. elif wavelen > low_freq_wavelen:
  1596. rope_factors.append(factor)
  1597. else:
  1598. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1599. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1600. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1601. def prepare_tensors(self):
  1602. super().prepare_tensors()
  1603. if self._experts is not None:
  1604. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1605. experts = [k for d in self._experts for k in d.keys()]
  1606. if len(experts) > 0:
  1607. raise ValueError(f"Unprocessed experts: {experts}")
  1608. @ModelBase.register(
  1609. "LlavaForConditionalGeneration", # pixtral
  1610. "Mistral3ForConditionalGeneration", # mistral small 3.1
  1611. )
  1612. class LlavaVisionModel(VisionModel):
  1613. img_break_tok_id = -1
  1614. def __init__(self, *args, **kwargs):
  1615. super().__init__(*args, **kwargs)
  1616. if self.hparams["model_type"] == "pixtral":
  1617. # layer_norm_eps is not in config.json, it is hard-coded in modeling_pixtral.py
  1618. self.hparams["layer_norm_eps"] = self.hparams.get("layer_norm_eps", 1e-5)
  1619. self.img_break_tok_id = self.get_token_id("[IMG_BREAK]")
  1620. logger.info(f"Image break token id: {self.img_break_tok_id}")
  1621. else:
  1622. raise ValueError(f"Unsupported model type: {self.hparams['model_type']}")
  1623. def get_token_id(self, token: str) -> int:
  1624. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1625. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1626. added_tokens_decoder = json.load(f)['added_tokens_decoder']
  1627. for id_, token_data in added_tokens_decoder.items():
  1628. if token_data["content"] == token:
  1629. return int(id_)
  1630. raise ValueError(f"Token '{token}' not found in tokenizer config.")
  1631. def set_gguf_parameters(self):
  1632. super().set_gguf_parameters()
  1633. hparams = self.hparams
  1634. if hparams["model_type"] == "pixtral":
  1635. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.PIXTRAL)
  1636. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  1637. # hidden_act
  1638. if hparams["hidden_act"] == "silu":
  1639. self.gguf_writer.add_vision_use_silu(True)
  1640. elif hparams["hidden_act"] == "gelu":
  1641. self.gguf_writer.add_vision_use_gelu(True)
  1642. else:
  1643. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  1644. # spatial_merge_size
  1645. if "spatial_merge_size" in self.global_config:
  1646. self.gguf_writer.add_vision_spatial_merge_size(self.global_config["spatial_merge_size"])
  1647. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1648. del bid # unused
  1649. n_head = self.hparams["num_attention_heads"]
  1650. n_kv_head = n_head
  1651. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower."):
  1652. # process vision tensors
  1653. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1654. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1655. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1656. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1657. return [(self.map_tensor_name(name), data_torch)]
  1658. if self.img_break_tok_id > 0 and "embed_tokens.weight" in name:
  1659. logger.info(f"Extracting [IMG_BREAK] token embedding from {name}")
  1660. # for pixtral model, we need to extract the [IMG_BREAK] token embedding
  1661. img_break_embd = data_torch[self.img_break_tok_id]
  1662. name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK]
  1663. return [(self.map_tensor_name(name), img_break_embd)]
  1664. return [] # skip other tensors
  1665. @ModelBase.register("Idefics3ForConditionalGeneration", "SmolVLMForConditionalGeneration")
  1666. class SmolVLMModel(VisionModel):
  1667. def __init__(self, *args, **kwargs):
  1668. super().__init__(*args, **kwargs)
  1669. if self.hparams["model_type"] == "smolvlm_vision":
  1670. # fix for SmolVLM2, missing some keys in config.json
  1671. # default values are taken from transformers code
  1672. self.hparams["hidden_size"] = self.hparams.get("hidden_size", 1152)
  1673. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 16)
  1674. self.hparams["intermediate_size"] = self.hparams.get("intermediate_size", 3072)
  1675. def set_gguf_parameters(self):
  1676. super().set_gguf_parameters()
  1677. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.IDEFICS3)
  1678. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  1679. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("scale_factor", 2))
  1680. self.gguf_writer.add_vision_use_gelu(True)
  1681. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1682. del bid, new_name, n_dims # unused
  1683. if ".embeddings." in name:
  1684. return gguf.GGMLQuantizationType.F32
  1685. return False
  1686. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1687. del bid # unused
  1688. is_vision_tensor = "vision_tower" in name or "vision_model" in name or "model.connector" in name
  1689. if is_vision_tensor:
  1690. return [(self.map_tensor_name(name), data_torch)]
  1691. return [] # skip other tensors
  1692. @ModelBase.register("Llama4ForConditionalGeneration")
  1693. class Llama4Model(LlamaModel):
  1694. model_arch = gguf.MODEL_ARCH.LLAMA4
  1695. undo_permute = False
  1696. def __init__(self, *args, **kwargs):
  1697. super().__init__(*args, **kwargs)
  1698. # IMPORTANT: the normal "intermediate_size" is renamed to "intermediate_size_mlp", we need to undo this
  1699. self.hparams["intermediate_size_moe"] = self.hparams["intermediate_size"]
  1700. self.hparams["intermediate_size"] = self.hparams["intermediate_size_mlp"]
  1701. def set_vocab(self):
  1702. self._set_vocab_gpt2()
  1703. self.gguf_writer.add_add_bos_token(True)
  1704. def set_gguf_parameters(self):
  1705. super().set_gguf_parameters()
  1706. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["interleave_moe_layer_step"])
  1707. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
  1708. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  1709. if name.startswith("language_model."):
  1710. name = name.replace("language_model.", "")
  1711. # split the gate_up into gate and up
  1712. if "gate_up_proj" in name:
  1713. name_up = name.replace("gate_up_proj", "up_proj.weight")
  1714. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  1715. dim_half = data_torch.shape[-1] // 2
  1716. gate_proj_weight, up_proj_weight = data_torch.transpose(-1, -2).split(dim_half, dim=-2)
  1717. return [
  1718. (self.map_tensor_name(name_gate), gate_proj_weight),
  1719. (self.map_tensor_name(name_up), up_proj_weight)
  1720. ]
  1721. if name.endswith("down_proj"):
  1722. name += ".weight"
  1723. data_torch = data_torch.transpose(-1, -2)
  1724. if "multi_modal_projector" in name or "vision_model" in name:
  1725. return []
  1726. return super().modify_tensors(data_torch, name, bid)
  1727. @ModelBase.register("Mistral3ForConditionalGeneration")
  1728. class Mistral3Model(LlamaModel):
  1729. model_arch = gguf.MODEL_ARCH.LLAMA
  1730. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  1731. name = name.replace("language_model.", "")
  1732. if "multi_modal_projector" in name or "vision_tower" in name:
  1733. return []
  1734. return super().modify_tensors(data_torch, name, bid)
  1735. @ModelBase.register("DeciLMForCausalLM")
  1736. class DeciModel(TextModel):
  1737. model_arch = gguf.MODEL_ARCH.DECI
  1738. @staticmethod
  1739. def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int:
  1740. # DeciLM-specific code
  1741. intermediate_size = int(2 * ffn_mult * n_embd / 3)
  1742. return DeciModel._find_multiple(intermediate_size, 256)
  1743. @staticmethod
  1744. def _find_multiple(n: int, k: int) -> int:
  1745. # DeciLM-specific code
  1746. if n % k == 0:
  1747. return n
  1748. return n + k - (n % k)
  1749. def __init__(self, *args, **kwargs):
  1750. super().__init__(*args, **kwargs)
  1751. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  1752. _block_configs: list[dict[str,Any]] = self.hparams["block_configs"]
  1753. assert self.block_count == len(_block_configs)
  1754. self._num_kv_heads = list()
  1755. self._num_heads = list()
  1756. _ffn_multipliers = list()
  1757. # ***linear attention layer***
  1758. # if n_heads_in_group is None and replace_with_linear is True
  1759. # then _num_kv_heads[il] is 0 and _num_heads[il] is num_attention_heads
  1760. # ***attention-free layer***
  1761. # if n_heads_in_group is None and replace_with_linear is False
  1762. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0
  1763. # ***normal attention-layer***
  1764. # if n_heads_in_group is not None, then
  1765. # _num_kv_heads[il] is num_attention_head // n_heads_in_group and
  1766. # _num_heads[il] is num_attention_head
  1767. # ***dummy layer*** for nemotron 253B
  1768. # if n_heads_in_group is None and ffn_mult is None
  1769. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
  1770. for il in range(len(_block_configs)):
  1771. if _block_configs[il]["attention"]["n_heads_in_group"] is None:
  1772. if _block_configs[il]["attention"]["replace_with_linear"] is True:
  1773. self._num_kv_heads.append(0)
  1774. self._num_heads.append(self.hparams["num_attention_heads"])
  1775. else:
  1776. self._num_kv_heads.append(0)
  1777. self._num_heads.append(0)
  1778. else:
  1779. self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
  1780. self._num_heads.append(self.hparams["num_attention_heads"])
  1781. if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
  1782. _ffn_multipliers.append(0.0)
  1783. else:
  1784. _ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
  1785. assert self.block_count == len(self._num_kv_heads)
  1786. assert self.block_count == len(self._num_heads)
  1787. assert self.block_count == len(_ffn_multipliers)
  1788. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  1789. assert isinstance(self._num_heads, list) and isinstance(self._num_heads[0], int)
  1790. assert isinstance(_ffn_multipliers, list) and isinstance(_ffn_multipliers[0], float)
  1791. self._ffn_dims: list[int] = [
  1792. DeciModel._ffn_mult_to_intermediate_size(multiplier, self.hparams["hidden_size"])
  1793. for multiplier in _ffn_multipliers
  1794. ]
  1795. def set_vocab(self):
  1796. # Please change tokenizer_config.json of Llama-3_1-Nemotron-51B's
  1797. # eos_token from '|eot_id|' to '|end_of_text|'
  1798. if self.hparams.get("vocab_size", 128256) == 128256:
  1799. tokens, toktypes, tokpre = self.get_vocab_base()
  1800. self.gguf_writer.add_tokenizer_model("gpt2")
  1801. self.gguf_writer.add_tokenizer_pre(tokpre)
  1802. self.gguf_writer.add_token_list(tokens)
  1803. self.gguf_writer.add_token_types(toktypes)
  1804. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  1805. special_vocab.add_to_gguf(self.gguf_writer)
  1806. else:
  1807. # DeciLM-7B
  1808. self._set_vocab_llama_hf()
  1809. def set_gguf_parameters(self):
  1810. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  1811. assert self.block_count == len(self._num_kv_heads)
  1812. assert self.block_count == len(self._num_heads)
  1813. assert self.block_count == len(self._ffn_dims)
  1814. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  1815. self.gguf_writer.add_rope_freq_base(rope_theta)
  1816. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  1817. self.gguf_writer.add_head_count(self._num_heads)
  1818. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  1819. self.gguf_writer.add_block_count(self.block_count)
  1820. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1821. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1822. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1823. self.gguf_writer.add_key_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1824. self.gguf_writer.add_value_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1825. self.gguf_writer.add_file_type(self.ftype)
  1826. else: # DeciLM-7B
  1827. super().set_gguf_parameters()
  1828. if "num_key_value_heads_per_layer" in self.hparams: # DeciLM-7B
  1829. self._num_kv_heads: list[int] = self.hparams["num_key_value_heads_per_layer"]
  1830. assert self.block_count == len(self._num_kv_heads)
  1831. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  1832. hparams = self.hparams
  1833. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1834. if "head_dim" in hparams:
  1835. rope_dim = hparams["head_dim"]
  1836. else:
  1837. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1838. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1839. rope_scaling = self.hparams.get("rope_scaling") or {}
  1840. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1841. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1842. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1843. @staticmethod
  1844. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1845. if n_head_kv is not None and n_head != n_head_kv:
  1846. n_head = n_head_kv
  1847. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1848. .swapaxes(1, 2)
  1849. .reshape(weights.shape))
  1850. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1851. n_head = self.hparams["num_attention_heads"]
  1852. if bid is not None:
  1853. if "num_key_value_heads_per_layer" in self.hparams:
  1854. n_kv_head = self.hparams["num_key_value_heads_per_layer"][bid]
  1855. elif "block_configs" in self.hparams:
  1856. n_kv_head = self._num_kv_heads[bid]
  1857. n_head = self._num_heads[bid]
  1858. else:
  1859. n_kv_head = self.hparams.get("num_key_value_heads")
  1860. else:
  1861. n_kv_head = self.hparams.get("num_key_value_heads")
  1862. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1863. data_torch = DeciModel.permute(data_torch, n_head, n_head)
  1864. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1865. data_torch = DeciModel.permute(data_torch, n_head, n_kv_head)
  1866. return [(self.map_tensor_name(name), data_torch)]
  1867. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1868. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1869. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1870. base = self.hparams.get("rope_theta", 10000.0)
  1871. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1872. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1873. factor = rope_scaling.get("factor", 8.0)
  1874. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1875. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1876. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1877. low_freq_wavelen = old_context_len / low_freq_factor
  1878. high_freq_wavelen = old_context_len / high_freq_factor
  1879. assert low_freq_wavelen != high_freq_wavelen
  1880. rope_factors = []
  1881. for freq in freqs:
  1882. wavelen = 2 * math.pi / freq
  1883. if wavelen < high_freq_wavelen:
  1884. rope_factors.append(1)
  1885. elif wavelen > low_freq_wavelen:
  1886. rope_factors.append(factor)
  1887. else:
  1888. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1889. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1890. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1891. def prepare_tensors(self):
  1892. super().prepare_tensors()
  1893. @ModelBase.register("BitnetForCausalLM")
  1894. class BitnetModel(TextModel):
  1895. model_arch = gguf.MODEL_ARCH.BITNET
  1896. def set_vocab(self):
  1897. self._set_vocab_sentencepiece()
  1898. def set_gguf_parameters(self):
  1899. super().set_gguf_parameters()
  1900. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1901. self.gguf_writer.add_rope_scaling_factor(1.0)
  1902. def weight_quant(self, weight: Tensor) -> Tensor:
  1903. dtype = weight.dtype
  1904. weight = weight.float()
  1905. scale = weight.abs().mean().clamp(min=1e-5)
  1906. iscale = 1 / scale
  1907. # TODO: multiply by the scale directly instead of inverting it twice
  1908. # (this is also unnecessarily doubly inverted upstream)
  1909. # ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
  1910. result = (weight * iscale).round().clamp(-1, 1) / iscale
  1911. return result.type(dtype)
  1912. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1913. new_name = self.map_tensor_name(name)
  1914. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  1915. gguf.MODEL_TENSOR.ATTN_Q,
  1916. gguf.MODEL_TENSOR.ATTN_K,
  1917. gguf.MODEL_TENSOR.ATTN_V,
  1918. gguf.MODEL_TENSOR.ATTN_OUT,
  1919. gguf.MODEL_TENSOR.FFN_UP,
  1920. gguf.MODEL_TENSOR.FFN_DOWN,
  1921. gguf.MODEL_TENSOR.FFN_GATE,
  1922. ]):
  1923. # transform weight into 1/0/-1 (in fp32)
  1924. data_torch = self.weight_quant(data_torch)
  1925. yield (new_name, data_torch)
  1926. @ModelBase.register("GrokForCausalLM")
  1927. class GrokModel(TextModel):
  1928. model_arch = gguf.MODEL_ARCH.GROK
  1929. def set_vocab(self):
  1930. self._set_vocab_sentencepiece()
  1931. def __init__(self, *args, **kwargs):
  1932. super().__init__(*args, **kwargs)
  1933. def set_gguf_parameters(self):
  1934. super().set_gguf_parameters()
  1935. _experts: list[dict[str, Tensor]] | None = None
  1936. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1937. # process the experts separately
  1938. if name.find(".moe.") != -1:
  1939. n_experts = self.hparams["num_local_experts"]
  1940. assert bid is not None
  1941. if self._experts is None:
  1942. self._experts = [{} for _ in range(self.block_count)]
  1943. self._experts[bid][name] = data_torch
  1944. if len(self._experts[bid]) >= n_experts * 3:
  1945. tensors: list[tuple[str, Tensor]] = []
  1946. # merge the experts into a single 3d tensor
  1947. for wid in ["linear", "linear_1", "linear_v"]:
  1948. datas: list[Tensor] = []
  1949. for xid in range(n_experts):
  1950. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
  1951. datas.append(self._experts[bid][ename])
  1952. del self._experts[bid][ename]
  1953. data_torch = torch.stack(datas, dim=0)
  1954. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
  1955. new_name = self.map_tensor_name(merged_name)
  1956. tensors.append((new_name, data_torch))
  1957. return tensors
  1958. else:
  1959. return []
  1960. return [(self.map_tensor_name(name), data_torch)]
  1961. @ModelBase.register("DbrxForCausalLM")
  1962. class DbrxModel(TextModel):
  1963. model_arch = gguf.MODEL_ARCH.DBRX
  1964. def set_gguf_parameters(self):
  1965. ffn_config = self.hparams["ffn_config"]
  1966. attn_config = self.hparams["attn_config"]
  1967. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  1968. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1969. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1970. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  1971. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1972. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  1973. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  1974. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  1975. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  1976. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  1977. self.gguf_writer.add_layer_norm_eps(1e-5)
  1978. self.gguf_writer.add_file_type(self.ftype)
  1979. logger.info(f"gguf: file type = {self.ftype}")
  1980. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1981. del bid # unused
  1982. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  1983. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  1984. n_embd = self.hparams["d_model"]
  1985. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  1986. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  1987. # But llama.cpp moe graph works differently
  1988. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  1989. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  1990. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1991. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  1992. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1993. experts = False
  1994. for exp_tensor_name in exp_tensor_names.keys():
  1995. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  1996. experts = True
  1997. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  1998. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  1999. data_torch = data_torch.permute(*permute_tensor)
  2000. break
  2001. # map tensor names
  2002. # In MoE models the ffn tensors are typically most of the model weights,
  2003. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  2004. # Every other model has the weight names ending in .weight,
  2005. # let's assume that is the convention which is not the case for dbrx:
  2006. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  2007. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  2008. return [(new_name, data_torch)]
  2009. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  2010. del name, new_name, bid # unused
  2011. return n_dims > 1
  2012. @ModelBase.register("MiniCPMForCausalLM")
  2013. class MiniCPMModel(TextModel):
  2014. model_arch = gguf.MODEL_ARCH.MINICPM
  2015. def set_gguf_parameters(self):
  2016. super().set_gguf_parameters()
  2017. embedding_scale = float(self.hparams["scale_emb"])
  2018. self.gguf_writer.add_embedding_scale(embedding_scale)
  2019. logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}")
  2020. residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5
  2021. self.gguf_writer.add_residual_scale(residual_scale)
  2022. logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}")
  2023. logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"]
  2024. self.gguf_writer.add_logit_scale(logit_scale)
  2025. logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}")
  2026. rope_scaling = self.hparams.get("rope_scaling") or {}
  2027. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "longrope":
  2028. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE)
  2029. logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}")
  2030. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2031. rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2032. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2033. if rope_scaling is not None:
  2034. long_factors = rope_scaling.get('long_factor', None)
  2035. short_factors = rope_scaling.get('short_factor', None)
  2036. if long_factors is None or short_factors is None:
  2037. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2038. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2039. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2040. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2041. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2042. def set_vocab(self):
  2043. self._set_vocab_sentencepiece()
  2044. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2045. del bid # unused
  2046. n_head = self.hparams["num_attention_heads"]
  2047. n_kv_head = self.hparams.get("num_key_value_heads")
  2048. # HF models permute some of the tensors, so we need to undo that
  2049. if name.endswith(("q_proj.weight")):
  2050. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2051. if name.endswith(("k_proj.weight")):
  2052. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2053. return [(self.map_tensor_name(name), data_torch)]
  2054. @ModelBase.register("MiniCPM3ForCausalLM")
  2055. class MiniCPM3Model(TextModel):
  2056. model_arch = gguf.MODEL_ARCH.MINICPM3
  2057. def set_gguf_parameters(self):
  2058. hparams = self.hparams
  2059. self.gguf_writer.add_file_type(self.ftype)
  2060. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2061. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2062. self.gguf_writer.add_block_count(self.block_count)
  2063. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2064. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2065. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  2066. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2067. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2068. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2069. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2070. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2071. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2072. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2073. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2074. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2075. if rope_scaling is not None:
  2076. rope_dims = self.hparams["qk_rope_head_dim"]
  2077. long_factors = rope_scaling.get('long_factor', None)
  2078. short_factors = rope_scaling.get('short_factor', None)
  2079. if long_factors is None or short_factors is None:
  2080. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2081. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2082. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2083. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2084. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2085. def set_vocab(self):
  2086. self._set_vocab_sentencepiece()
  2087. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  2088. if n_kv_head is not None and n_head != n_kv_head:
  2089. n_head //= n_kv_head
  2090. return (
  2091. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2092. .swapaxes(1, 2)
  2093. .reshape(weights.shape)
  2094. )
  2095. @ModelBase.register("QWenLMHeadModel")
  2096. class QwenModel(TextModel):
  2097. model_arch = gguf.MODEL_ARCH.QWEN
  2098. @staticmethod
  2099. def token_bytes_to_string(b):
  2100. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2101. byte_encoder = bytes_to_unicode()
  2102. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2103. @staticmethod
  2104. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2105. parts = [bytes([b]) for b in token]
  2106. while True:
  2107. min_idx = None
  2108. min_rank = None
  2109. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2110. rank = mergeable_ranks.get(pair[0] + pair[1])
  2111. if rank is not None and (min_rank is None or rank < min_rank):
  2112. min_idx = i
  2113. min_rank = rank
  2114. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2115. break
  2116. assert min_idx is not None
  2117. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2118. return parts
  2119. def set_vocab(self):
  2120. self._set_vocab_qwen()
  2121. def set_gguf_parameters(self):
  2122. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2123. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2124. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2125. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2126. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  2127. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2128. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2129. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2130. self.gguf_writer.add_file_type(self.ftype)
  2131. @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM")
  2132. class Qwen2Model(TextModel):
  2133. model_arch = gguf.MODEL_ARCH.QWEN2
  2134. def set_vocab(self):
  2135. try:
  2136. self._set_vocab_sentencepiece()
  2137. except FileNotFoundError:
  2138. self._set_vocab_gpt2()
  2139. def set_gguf_parameters(self):
  2140. super().set_gguf_parameters()
  2141. self._try_set_pooling_type()
  2142. rope_scaling = self.hparams.get("rope_scaling") or {}
  2143. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2144. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2145. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2146. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2147. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2148. if self.hf_arch == "Qwen2Model":
  2149. name = f"model.{name}" # map to Qwen2ForCausalLM tensors
  2150. if "language_model." in name:
  2151. name = name.replace("language_model.", "") # for InternVL
  2152. if name.startswith("mlp") or name.startswith("vision_model"):
  2153. # skip visual tensors
  2154. return []
  2155. yield from super().modify_tensors(data_torch, name, bid)
  2156. @ModelBase.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2157. class Qwen2VLModel(TextModel):
  2158. model_arch = gguf.MODEL_ARCH.QWEN2VL
  2159. def set_gguf_parameters(self):
  2160. super().set_gguf_parameters()
  2161. mrope_section = self.hparams["rope_scaling"]["mrope_section"]
  2162. mrope_section += [0] * max(0, 4 - len(mrope_section))
  2163. self.gguf_writer.add_rope_dimension_sections(mrope_section)
  2164. def set_vocab(self):
  2165. try:
  2166. self._set_vocab_sentencepiece()
  2167. except FileNotFoundError:
  2168. self._set_vocab_gpt2()
  2169. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2170. del bid # unused
  2171. if name.startswith("visual."):
  2172. # skip visual tensors
  2173. return []
  2174. return [(self.map_tensor_name(name), data_torch)]
  2175. @ModelBase.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2176. class Qwen2VLVisionModel(VisionModel):
  2177. def __init__(self, *args, **kwargs):
  2178. super().__init__(*args, **kwargs)
  2179. self.hparams["image_size"] = self.hparams.get("image_size", 560)
  2180. # rename config.json values
  2181. self.hparams["num_attention_heads"] = self.hparams.get("num_heads")
  2182. self.hparams["num_hidden_layers"] = self.hparams.get("depth")
  2183. if "embed_dim" in self.hparams: # qwen2vl
  2184. self.hparams["intermediate_size"] = self.hparams.get("hidden_size")
  2185. self.hparams["hidden_size"] = self.hparams.get("embed_dim")
  2186. def set_gguf_parameters(self):
  2187. super().set_gguf_parameters()
  2188. hparams = self.hparams
  2189. if self.global_config['model_type'] == 'qwen2_vl':
  2190. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN2VL)
  2191. elif self.global_config['model_type'] == 'qwen2_5_vl':
  2192. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN25VL)
  2193. self.gguf_writer.add_vision_use_silu(True)
  2194. # find n_wa_pattern (window attention pattern)
  2195. fullatt_block_indexes = hparams.get("fullatt_block_indexes")
  2196. assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
  2197. n_wa_pattern = fullatt_block_indexes[0] + 1
  2198. # validate n_wa_pattern
  2199. for i in range(1, len(fullatt_block_indexes)):
  2200. if fullatt_block_indexes[i] - fullatt_block_indexes[i - 1] != n_wa_pattern:
  2201. raise ValueError(f"Invalid fullatt_block_indexes: {fullatt_block_indexes}")
  2202. self.gguf_writer.add_vision_n_wa_pattern(n_wa_pattern)
  2203. else:
  2204. raise ValueError(f"Unknown QwenVL model type: {self.global_config['model_type']}")
  2205. # default values below are taken from HF tranformers code
  2206. self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
  2207. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2208. del bid, name, n_dims # unused
  2209. if ".patch_embd." in new_name:
  2210. return gguf.GGMLQuantizationType.F16
  2211. if ".position_embd." in new_name:
  2212. return gguf.GGMLQuantizationType.F32
  2213. return False
  2214. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2215. del bid # unused
  2216. if name.startswith("visual."):
  2217. # process visual tensors
  2218. # split QKV tensors if needed
  2219. if ".qkv." in name:
  2220. if data_torch.ndim == 2: # weight
  2221. c3, _ = data_torch.shape
  2222. else: # bias
  2223. c3 = data_torch.shape[0]
  2224. assert c3 % 3 == 0
  2225. c = c3 // 3
  2226. wq = data_torch[:c]
  2227. wk = data_torch[c: c * 2]
  2228. wv = data_torch[c * 2:]
  2229. return [
  2230. (self.map_tensor_name(name.replace("qkv", "q")), wq),
  2231. (self.map_tensor_name(name.replace("qkv", "k")), wk),
  2232. (self.map_tensor_name(name.replace("qkv", "v")), wv),
  2233. ]
  2234. elif 'patch_embed.proj.weight' in name:
  2235. # split Conv3D into Conv2Ds
  2236. c1, c2, kt, kh, kw = data_torch.shape
  2237. del c1, c2, kh, kw # unused
  2238. assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
  2239. return [
  2240. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]),
  2241. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]),
  2242. ]
  2243. else:
  2244. return [(self.map_tensor_name(name), data_torch)]
  2245. return [] # skip other tensors
  2246. @ModelBase.register("InternVisionModel")
  2247. class InternVisionModel(VisionModel):
  2248. def set_gguf_parameters(self):
  2249. super().set_gguf_parameters()
  2250. hparams = self.hparams
  2251. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.INTERNVL)
  2252. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  2253. # hidden_act
  2254. if hparams["hidden_act"] == "silu":
  2255. self.gguf_writer.add_vision_use_silu(True)
  2256. elif hparams["hidden_act"] == "gelu":
  2257. self.gguf_writer.add_vision_use_gelu(True)
  2258. else:
  2259. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  2260. # downsample_ratio
  2261. downsample_ratio = self.global_config.get("downsample_ratio")
  2262. assert downsample_ratio is not None
  2263. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / downsample_ratio))
  2264. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2265. del bid, name, n_dims # unused
  2266. if ".patch_embd." in new_name:
  2267. return gguf.GGMLQuantizationType.F16
  2268. if ".position_embd." in new_name:
  2269. return gguf.GGMLQuantizationType.F32
  2270. return False
  2271. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2272. del bid # unused
  2273. if name.startswith("vision_model") or name.startswith("mlp"):
  2274. # process visual tensors
  2275. # correct name
  2276. if name.startswith("vision_model"):
  2277. name = "vision_tower." + name
  2278. if (".ls" in name or "position_embedding" in name) and not name.endswith(".weight"):
  2279. name += ".weight"
  2280. # split QKV tensors if needed
  2281. if ".qkv." in name:
  2282. if data_torch.ndim == 2: # weight
  2283. c3, _ = data_torch.shape
  2284. else: # bias
  2285. c3 = data_torch.shape[0]
  2286. assert c3 % 3 == 0
  2287. c = c3 // 3
  2288. wq = data_torch[:c]
  2289. wk = data_torch[c: c * 2]
  2290. wv = data_torch[c * 2:]
  2291. return [
  2292. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.q_proj")), wq),
  2293. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.k_proj")), wk),
  2294. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.v_proj")), wv),
  2295. ]
  2296. return [(self.map_tensor_name(name), data_torch)]
  2297. return [] # skip other tensors
  2298. @ModelBase.register("WavTokenizerDec")
  2299. class WavTokenizerDecModel(TextModel):
  2300. model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC
  2301. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2302. del bid # unused
  2303. if \
  2304. name.endswith("codebook.cluster_size") or \
  2305. name.endswith("codebook.embed_avg") or \
  2306. name.endswith("codebook.inited"):
  2307. logger.debug(f"Skipping {name!r}")
  2308. return []
  2309. logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}")
  2310. return [(self.map_tensor_name(name), data_torch)]
  2311. def set_vocab(self):
  2312. self._set_vocab_none()
  2313. def set_gguf_parameters(self):
  2314. super().set_gguf_parameters()
  2315. self.gguf_writer.add_vocab_size (self.hparams["vocab_size"])
  2316. self.gguf_writer.add_features_length (self.hparams["n_embd_features"])
  2317. self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"])
  2318. self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"])
  2319. self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"])
  2320. self.gguf_writer.add_posnet_embedding_length(self.hparams["posnet"]["n_embd"])
  2321. self.gguf_writer.add_posnet_block_count (self.hparams["posnet"]["n_layer"])
  2322. self.gguf_writer.add_convnext_embedding_length(self.hparams["convnext"]["n_embd"])
  2323. self.gguf_writer.add_convnext_block_count (self.hparams["convnext"]["n_layer"])
  2324. self.gguf_writer.add_causal_attention(False)
  2325. @ModelBase.register("Qwen2MoeForCausalLM")
  2326. class Qwen2MoeModel(TextModel):
  2327. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  2328. def set_gguf_parameters(self):
  2329. super().set_gguf_parameters()
  2330. if (n_experts := self.hparams.get("num_experts")) is not None:
  2331. self.gguf_writer.add_expert_count(n_experts)
  2332. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2333. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2334. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  2335. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  2336. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  2337. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  2338. # YaRN is not enabled by default
  2339. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  2340. rope_scaling = self.hparams.get("rope_scaling") or {}
  2341. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2342. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2343. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2344. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2345. _experts: list[dict[str, Tensor]] | None = None
  2346. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2347. # process the experts separately
  2348. if name.find("experts") != -1:
  2349. n_experts = self.hparams["num_experts"]
  2350. assert bid is not None
  2351. if self._experts is None:
  2352. self._experts = [{} for _ in range(self.block_count)]
  2353. self._experts[bid][name] = data_torch
  2354. if len(self._experts[bid]) >= n_experts * 3:
  2355. tensors: list[tuple[str, Tensor]] = []
  2356. # merge the experts into a single 3d tensor
  2357. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  2358. datas: list[Tensor] = []
  2359. for xid in range(n_experts):
  2360. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2361. datas.append(self._experts[bid][ename])
  2362. del self._experts[bid][ename]
  2363. data_torch = torch.stack(datas, dim=0)
  2364. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2365. new_name = self.map_tensor_name(merged_name)
  2366. tensors.append((new_name, data_torch))
  2367. return tensors
  2368. else:
  2369. return []
  2370. return [(self.map_tensor_name(name), data_torch)]
  2371. def prepare_tensors(self):
  2372. super().prepare_tensors()
  2373. if self._experts is not None:
  2374. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2375. experts = [k for d in self._experts for k in d.keys()]
  2376. if len(experts) > 0:
  2377. raise ValueError(f"Unprocessed experts: {experts}")
  2378. @ModelBase.register("Qwen3ForCausalLM")
  2379. class Qwen3Model(Qwen2Model):
  2380. model_arch = gguf.MODEL_ARCH.QWEN3
  2381. @ModelBase.register("Qwen3MoeForCausalLM")
  2382. class Qwen3MoeModel(Qwen2MoeModel):
  2383. model_arch = gguf.MODEL_ARCH.QWEN3MOE
  2384. @ModelBase.register("GPT2LMHeadModel")
  2385. class GPT2Model(TextModel):
  2386. model_arch = gguf.MODEL_ARCH.GPT2
  2387. def set_gguf_parameters(self):
  2388. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  2389. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  2390. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2391. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  2392. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2393. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2394. self.gguf_writer.add_file_type(self.ftype)
  2395. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2396. del bid # unused
  2397. tensors: list[tuple[str, Tensor]] = []
  2398. # we don't need these
  2399. if name.endswith((".attn.bias", ".attn.masked_bias")):
  2400. return tensors
  2401. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  2402. data_torch = data_torch.transpose(1, 0)
  2403. new_name = self.map_tensor_name(name)
  2404. tensors.append((new_name, data_torch))
  2405. return tensors
  2406. @ModelBase.register("PhiForCausalLM")
  2407. class Phi2Model(TextModel):
  2408. model_arch = gguf.MODEL_ARCH.PHI2
  2409. def set_gguf_parameters(self):
  2410. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  2411. rot_pct = self.find_hparam(["partial_rotary_factor"])
  2412. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2413. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2414. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  2415. self.gguf_writer.add_embedding_length(n_embd)
  2416. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  2417. self.gguf_writer.add_block_count(block_count)
  2418. self.gguf_writer.add_head_count(n_head)
  2419. self.gguf_writer.add_head_count_kv(n_head)
  2420. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  2421. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  2422. self.gguf_writer.add_file_type(self.ftype)
  2423. self.gguf_writer.add_add_bos_token(False)
  2424. @ModelBase.register("Phi3ForCausalLM")
  2425. class Phi3MiniModel(TextModel):
  2426. model_arch = gguf.MODEL_ARCH.PHI3
  2427. def set_vocab(self):
  2428. # Phi-4 model uses GPT2Tokenizer
  2429. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2430. if tokenizer_config_file.is_file():
  2431. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2432. tokenizer_config_json = json.load(f)
  2433. tokenizer_class = tokenizer_config_json['tokenizer_class']
  2434. if tokenizer_class == 'GPT2Tokenizer':
  2435. return self._set_vocab_gpt2()
  2436. from sentencepiece import SentencePieceProcessor
  2437. tokenizer_path = self.dir_model / 'tokenizer.model'
  2438. if not tokenizer_path.is_file():
  2439. raise ValueError(f'Error: Missing {tokenizer_path}')
  2440. tokenizer = SentencePieceProcessor()
  2441. tokenizer.LoadFromFile(str(tokenizer_path))
  2442. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2443. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2444. scores: list[float] = [-10000.0] * vocab_size
  2445. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2446. for token_id in range(tokenizer.vocab_size()):
  2447. piece = tokenizer.IdToPiece(token_id)
  2448. text = piece.encode("utf-8")
  2449. score = tokenizer.GetScore(token_id)
  2450. toktype = SentencePieceTokenTypes.NORMAL
  2451. if tokenizer.IsUnknown(token_id):
  2452. toktype = SentencePieceTokenTypes.UNKNOWN
  2453. elif tokenizer.IsControl(token_id):
  2454. toktype = SentencePieceTokenTypes.CONTROL
  2455. elif tokenizer.IsUnused(token_id):
  2456. toktype = SentencePieceTokenTypes.UNUSED
  2457. elif tokenizer.IsByte(token_id):
  2458. toktype = SentencePieceTokenTypes.BYTE
  2459. tokens[token_id] = text
  2460. scores[token_id] = score
  2461. toktypes[token_id] = toktype
  2462. added_tokens_file = self.dir_model / 'added_tokens.json'
  2463. if added_tokens_file.is_file():
  2464. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2465. added_tokens_json = json.load(f)
  2466. for key in added_tokens_json:
  2467. token_id = added_tokens_json[key]
  2468. if token_id >= vocab_size:
  2469. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2470. continue
  2471. tokens[token_id] = key.encode("utf-8")
  2472. scores[token_id] = -1000.0
  2473. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2474. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2475. if tokenizer_config_file.is_file():
  2476. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2477. tokenizer_config_json = json.load(f)
  2478. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  2479. for token_id, foken_data in added_tokens_decoder.items():
  2480. token_id = int(token_id)
  2481. token = foken_data["content"].encode("utf-8")
  2482. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2483. if tokens[token_id] != token:
  2484. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2485. tokens[token_id] = token
  2486. scores[token_id] = -1000.0
  2487. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2488. if foken_data.get("special"):
  2489. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2490. tokenizer_file = self.dir_model / 'tokenizer.json'
  2491. if tokenizer_file.is_file():
  2492. with open(tokenizer_file, "r", encoding="utf-8") as f:
  2493. tokenizer_json = json.load(f)
  2494. added_tokens = tokenizer_json.get("added_tokens", [])
  2495. for foken_data in added_tokens:
  2496. token_id = int(foken_data["id"])
  2497. token = foken_data["content"].encode("utf-8")
  2498. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2499. if tokens[token_id] != token:
  2500. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2501. tokens[token_id] = token
  2502. scores[token_id] = -1000.0
  2503. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2504. if foken_data.get("special"):
  2505. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2506. self.gguf_writer.add_tokenizer_model("llama")
  2507. self.gguf_writer.add_tokenizer_pre("default")
  2508. self.gguf_writer.add_token_list(tokens)
  2509. self.gguf_writer.add_token_scores(scores)
  2510. self.gguf_writer.add_token_types(toktypes)
  2511. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2512. special_vocab.add_to_gguf(self.gguf_writer)
  2513. def set_gguf_parameters(self):
  2514. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  2515. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2516. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2517. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  2518. rms_eps = self.find_hparam(["rms_norm_eps"])
  2519. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  2520. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  2521. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  2522. rope_dims = int(rot_pct * n_embd) // n_head
  2523. self.gguf_writer.add_context_length(max_pos_embds)
  2524. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  2525. self.gguf_writer.add_embedding_length(n_embd)
  2526. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  2527. self.gguf_writer.add_block_count(block_count)
  2528. self.gguf_writer.add_head_count(n_head)
  2529. self.gguf_writer.add_head_count_kv(n_head_kv)
  2530. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  2531. self.gguf_writer.add_rope_dimension_count(rope_dims)
  2532. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  2533. self.gguf_writer.add_file_type(self.ftype)
  2534. sliding_window = self.hparams.get("sliding_window")
  2535. # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
  2536. if sliding_window is None:
  2537. sliding_window = 0
  2538. self.gguf_writer.add_sliding_window(sliding_window)
  2539. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2540. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2541. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2542. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  2543. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  2544. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  2545. rope_dims = int(rot_pct * n_embd) // n_head
  2546. # write rope scaling for long context (128k) model
  2547. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2548. if rope_scaling is None:
  2549. return
  2550. scale = max_pos_embds / orig_max_pos_embds
  2551. rope_scaling_type = rope_scaling.get('rope_type', rope_scaling.get('type', '')).lower()
  2552. if len(rope_scaling_type) == 0:
  2553. raise KeyError('Missing the required key rope_scaling.type')
  2554. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  2555. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  2556. elif rope_scaling_type == 'yarn':
  2557. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  2558. else:
  2559. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  2560. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  2561. long_factors = rope_scaling.get('long_factor', None)
  2562. short_factors = rope_scaling.get('short_factor', None)
  2563. if long_factors is None or short_factors is None:
  2564. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2565. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2566. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}. long_factors = {len(long_factors)}, short_factors = {len(short_factors)}.')
  2567. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2568. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2569. @ModelBase.register("PhiMoEForCausalLM")
  2570. class PhiMoeModel(Phi3MiniModel):
  2571. model_arch = gguf.MODEL_ARCH.PHIMOE
  2572. _experts: list[dict[str, Tensor]] | None = None
  2573. def set_gguf_parameters(self):
  2574. super().set_gguf_parameters()
  2575. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  2576. self.gguf_writer.add_expert_count(self.hparams["num_local_experts"])
  2577. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2578. # process the experts separately
  2579. if name.find("block_sparse_moe.experts") != -1:
  2580. n_experts = self.hparams["num_local_experts"]
  2581. assert bid is not None
  2582. if self._experts is None:
  2583. self._experts = [{} for _ in range(self.block_count)]
  2584. self._experts[bid][name] = data_torch
  2585. if len(self._experts[bid]) >= n_experts * 3:
  2586. tensors: list[tuple[str, Tensor]] = []
  2587. # merge the experts into a single 3d tensor
  2588. for w_name in ["w1", "w2", "w3"]:
  2589. datas: list[Tensor] = []
  2590. for xid in range(n_experts):
  2591. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  2592. datas.append(self._experts[bid][ename])
  2593. del self._experts[bid][ename]
  2594. data_torch = torch.stack(datas, dim=0)
  2595. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  2596. new_name = self.map_tensor_name(merged_name)
  2597. tensors.append((new_name, data_torch))
  2598. return tensors
  2599. else:
  2600. return []
  2601. return [(self.map_tensor_name(name), data_torch)]
  2602. def prepare_tensors(self):
  2603. super().prepare_tensors()
  2604. if self._experts is not None:
  2605. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2606. experts = [k for d in self._experts for k in d.keys()]
  2607. if len(experts) > 0:
  2608. raise ValueError(f"Unprocessed experts: {experts}")
  2609. @ModelBase.register("PlamoForCausalLM")
  2610. class PlamoModel(TextModel):
  2611. model_arch = gguf.MODEL_ARCH.PLAMO
  2612. def set_vocab(self):
  2613. self._set_vocab_sentencepiece()
  2614. def set_gguf_parameters(self):
  2615. hparams = self.hparams
  2616. block_count = hparams["num_hidden_layers"]
  2617. self.gguf_writer.add_context_length(4096) # not in config.json
  2618. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2619. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2620. self.gguf_writer.add_block_count(block_count)
  2621. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2622. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  2623. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2624. self.gguf_writer.add_file_type(self.ftype)
  2625. def shuffle_attn_q_weight(self, data_torch):
  2626. assert data_torch.size() == (5120, 5120)
  2627. data_torch = data_torch.reshape(8, 5, 128, 5120)
  2628. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  2629. data_torch = torch.reshape(data_torch, (5120, 5120))
  2630. return data_torch
  2631. def shuffle_attn_output_weight(self, data_torch):
  2632. assert data_torch.size() == (5120, 5120)
  2633. data_torch = data_torch.reshape(5120, 8, 5, 128)
  2634. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  2635. data_torch = torch.reshape(data_torch, (5120, 5120))
  2636. return data_torch
  2637. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2638. del bid # unused
  2639. new_name = self.map_tensor_name(name)
  2640. # shuffle for broadcasting of gqa in ggml_mul_mat
  2641. if new_name.endswith("attn_q.weight"):
  2642. data_torch = self.shuffle_attn_q_weight(data_torch)
  2643. elif new_name.endswith("attn_output.weight"):
  2644. data_torch = self.shuffle_attn_output_weight(data_torch)
  2645. return [(new_name, data_torch)]
  2646. @ModelBase.register("CodeShellForCausalLM")
  2647. class CodeShellModel(TextModel):
  2648. model_arch = gguf.MODEL_ARCH.CODESHELL
  2649. def set_gguf_parameters(self):
  2650. block_count = self.hparams["n_layer"]
  2651. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  2652. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2653. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  2654. self.gguf_writer.add_block_count(block_count)
  2655. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2656. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  2657. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2658. self.gguf_writer.add_file_type(self.ftype)
  2659. self.gguf_writer.add_rope_freq_base(10000.0)
  2660. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2661. self.gguf_writer.add_rope_scaling_factor(1.0)
  2662. _has_tok_embd = False
  2663. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2664. del bid # unused
  2665. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  2666. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  2667. new_name = self.map_tensor_name(name)
  2668. # assuming token_embd.weight is seen before output.weight
  2669. if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  2670. # even though the tensor file(s) does not contain the word embeddings they are still in the weight map
  2671. if self.tensor_names and "transformer.wte.weight" in self.tensor_names:
  2672. logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied")
  2673. self.tensor_names.remove("transformer.wte.weight")
  2674. elif new_name == tok_embd_name:
  2675. self._has_tok_embd = True
  2676. return [(new_name, data_torch)]
  2677. @ModelBase.register("InternLM2ForCausalLM")
  2678. class InternLM2Model(TextModel):
  2679. model_arch = gguf.MODEL_ARCH.INTERNLM2
  2680. def set_vocab(self):
  2681. # (TODO): Is there a better way?
  2682. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  2683. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  2684. # recognized as an empty string in C++.
  2685. from sentencepiece import SentencePieceProcessor
  2686. from sentencepiece import sentencepiece_model_pb2 as model
  2687. tokenizer_path = self.dir_model / 'tokenizer.model'
  2688. tokens: list[bytes] = []
  2689. scores: list[float] = []
  2690. toktypes: list[int] = []
  2691. if not tokenizer_path.is_file():
  2692. logger.error(f'Error: Missing {tokenizer_path}')
  2693. sys.exit(1)
  2694. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2695. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2696. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2697. tokenizer = SentencePieceProcessor()
  2698. tokenizer.LoadFromFile(str(tokenizer_path))
  2699. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2700. for token_id in range(vocab_size):
  2701. piece = tokenizer.IdToPiece(token_id)
  2702. text = piece.encode("utf-8")
  2703. score = tokenizer.GetScore(token_id)
  2704. if text == b"\x00":
  2705. # (TODO): fixme
  2706. # Hack here and replace the \x00 characters.
  2707. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  2708. text = "🐉".encode("utf-8")
  2709. toktype = SentencePieceTokenTypes.NORMAL
  2710. if tokenizer.IsUnknown(token_id):
  2711. toktype = SentencePieceTokenTypes.UNKNOWN
  2712. elif tokenizer.IsControl(token_id):
  2713. toktype = SentencePieceTokenTypes.CONTROL
  2714. elif tokenizer.IsUnused(token_id):
  2715. toktype = SentencePieceTokenTypes.UNUSED
  2716. elif tokenizer.IsByte(token_id):
  2717. toktype = SentencePieceTokenTypes.BYTE
  2718. # take care of ununsed raw token
  2719. if piece.startswith('[UNUSED'):
  2720. toktype = SentencePieceTokenTypes.UNUSED
  2721. tokens.append(text)
  2722. scores.append(score)
  2723. toktypes.append(toktype)
  2724. added_tokens_file = self.dir_model / 'added_tokens.json'
  2725. if added_tokens_file.is_file():
  2726. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2727. added_tokens_json = json.load(f)
  2728. for key in added_tokens_json:
  2729. tokens.append(key.encode("utf-8"))
  2730. scores.append(-1000.0)
  2731. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  2732. chat_eos_token = '<|im_end|>'
  2733. chat_eos_token_id = None
  2734. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2735. if tokenizer_config_file.is_file():
  2736. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2737. tokenizer_config_json = json.load(f)
  2738. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  2739. for token_id, foken_data in added_tokens_decoder.items():
  2740. token_id = int(token_id)
  2741. token = foken_data["content"]
  2742. if token == chat_eos_token:
  2743. chat_eos_token_id = token_id
  2744. token = token.encode("utf-8")
  2745. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2746. if tokens[token_id] != token:
  2747. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2748. tokens[token_id] = token
  2749. scores[token_id] = -1000.0
  2750. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2751. if foken_data.get("special"):
  2752. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2753. tokenizer_file = self.dir_model / 'tokenizer.json'
  2754. if tokenizer_file.is_file():
  2755. with open(tokenizer_file, "r", encoding="utf-8") as f:
  2756. tokenizer_json = json.load(f)
  2757. added_tokens = tokenizer_json.get("added_tokens", [])
  2758. for foken_data in added_tokens:
  2759. token_id = int(foken_data["id"])
  2760. token = foken_data["content"]
  2761. if token == chat_eos_token:
  2762. chat_eos_token_id = token_id
  2763. token = token.encode("utf-8")
  2764. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2765. if tokens[token_id] != token:
  2766. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2767. tokens[token_id] = token
  2768. scores[token_id] = -1000.0
  2769. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2770. if foken_data.get("special"):
  2771. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2772. self.gguf_writer.add_tokenizer_model("llama")
  2773. self.gguf_writer.add_tokenizer_pre("default")
  2774. self.gguf_writer.add_token_list(tokens)
  2775. self.gguf_writer.add_token_scores(scores)
  2776. self.gguf_writer.add_token_types(toktypes)
  2777. self.gguf_writer.add_add_space_prefix(add_prefix)
  2778. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2779. old_eos = special_vocab.special_token_ids["eos"]
  2780. if chat_eos_token_id is not None:
  2781. # For the chat model, we replace the eos with '<|im_end|>'.
  2782. # TODO: this is a hack, should be fixed
  2783. # https://github.com/ggml-org/llama.cpp/pull/6745#issuecomment-2067687048
  2784. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  2785. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  2786. " in chat mode so that the conversation can end normally.")
  2787. special_vocab.add_to_gguf(self.gguf_writer)
  2788. def set_gguf_parameters(self):
  2789. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2790. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2791. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2792. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2793. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  2794. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2795. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2796. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  2797. self.gguf_writer.add_file_type(self.ftype)
  2798. rope_scaling = self.hparams.get("rope_scaling") or {}
  2799. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  2800. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2801. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2802. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2803. num_heads = self.hparams["num_attention_heads"]
  2804. num_kv_heads = self.hparams["num_key_value_heads"]
  2805. n_embd = self.hparams["hidden_size"]
  2806. q_per_kv = num_heads // num_kv_heads
  2807. head_dim = n_embd // num_heads
  2808. num_groups = num_heads // q_per_kv
  2809. name = name.replace("language_model.", "") # InternVL
  2810. if name.startswith("mlp") or name.startswith("vision_model"):
  2811. # skip visual tensors
  2812. return []
  2813. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  2814. qkv = data_torch
  2815. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  2816. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  2817. # The model weights of q and k equire additional reshape.
  2818. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  2819. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  2820. v = v.reshape((-1, v.shape[-1]))
  2821. return [
  2822. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  2823. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  2824. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  2825. ]
  2826. else:
  2827. return [(self.map_tensor_name(name), data_torch)]
  2828. @ModelBase.register("InternLM3ForCausalLM")
  2829. class InternLM3Model(TextModel):
  2830. model_arch = gguf.MODEL_ARCH.LLAMA
  2831. def set_vocab(self):
  2832. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  2833. self.gguf_writer.add_tokenizer_model("llama")
  2834. self.gguf_writer.add_tokenizer_pre("default")
  2835. self.gguf_writer.add_token_list(tokens)
  2836. self.gguf_writer.add_token_scores(scores)
  2837. self.gguf_writer.add_token_types(toktypes)
  2838. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2839. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2840. if tokenizer_config_file.is_file():
  2841. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2842. tokenizer_config_json = json.load(f)
  2843. if "add_prefix_space" in tokenizer_config_json:
  2844. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  2845. if "added_tokens_decoder" in tokenizer_config_json:
  2846. for token_id, token_data in tokenizer_config_json["added_tokens_decoder"].items():
  2847. if token_data.get("special"):
  2848. token_id = int(token_id)
  2849. token = token_data["content"]
  2850. special_vocab._set_special_token(token, token_id)
  2851. # update eos token
  2852. if token == '<|im_end|>' and "eos" in special_vocab.special_token_ids:
  2853. special_vocab.special_token_ids["eos"] = token_id
  2854. special_vocab.add_to_gguf(self.gguf_writer)
  2855. def set_gguf_parameters(self):
  2856. super().set_gguf_parameters()
  2857. hparams = self.hparams
  2858. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2859. if "head_dim" in hparams:
  2860. rope_dim = hparams["head_dim"]
  2861. else:
  2862. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  2863. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2864. rope_scaling = self.hparams.get("rope_scaling") or {}
  2865. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  2866. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2867. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2868. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2869. n_head = self.hparams["num_attention_heads"]
  2870. n_kv_head = self.hparams.get("num_key_value_heads")
  2871. name = name.replace("language_model.", "") # InternVL
  2872. if name.startswith("mlp") or name.startswith("vision_model"):
  2873. # skip visual tensors
  2874. return []
  2875. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2876. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2877. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2878. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2879. return [(self.map_tensor_name(name), data_torch)]
  2880. @ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel")
  2881. class BertModel(TextModel):
  2882. model_arch = gguf.MODEL_ARCH.BERT
  2883. def __init__(self, *args, **kwargs):
  2884. super().__init__(*args, **kwargs)
  2885. self.vocab_size = None
  2886. def set_gguf_parameters(self):
  2887. super().set_gguf_parameters()
  2888. self.gguf_writer.add_causal_attention(False)
  2889. self._try_set_pooling_type()
  2890. def set_vocab(self):
  2891. tokens, toktypes, tokpre = self.get_vocab_base()
  2892. self.vocab_size = len(tokens)
  2893. # we need this to validate the size of the token_type embeddings
  2894. # though currently we are passing all zeros to the token_type embeddings
  2895. # "Sequence A" or "Sequence B"
  2896. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  2897. # convert to phantom space vocab
  2898. def phantom(tok):
  2899. if tok.startswith("[") and tok.endswith("]"):
  2900. return tok
  2901. if tok.startswith("##"):
  2902. return tok[2:]
  2903. return "\u2581" + tok
  2904. tokens = list(map(phantom, tokens))
  2905. # add vocab to gguf
  2906. self.gguf_writer.add_tokenizer_model("bert")
  2907. self.gguf_writer.add_tokenizer_pre(tokpre)
  2908. self.gguf_writer.add_token_list(tokens)
  2909. self.gguf_writer.add_token_types(toktypes)
  2910. # handle special tokens
  2911. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2912. special_vocab.add_to_gguf(self.gguf_writer)
  2913. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2914. del bid # unused
  2915. if name.startswith("bert."):
  2916. name = name[5:]
  2917. if name.endswith(".gamma"):
  2918. name = name[:-6] + ".weight"
  2919. if name.endswith(".beta"):
  2920. name = name[:-5] + ".bias"
  2921. # we are only using BERT for embeddings so we don't need the pooling layer
  2922. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  2923. return [] # we don't need these
  2924. if name.startswith("cls.predictions"):
  2925. return []
  2926. if name.startswith("cls.seq_relationship"):
  2927. return []
  2928. return [(self.map_tensor_name(name), data_torch)]
  2929. def _xlmroberta_tokenizer_init(self) -> None:
  2930. # we need the pad_token_id to know how to chop down position_embd matrix
  2931. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  2932. self._position_offset = 1 + pad_token_id
  2933. if "max_position_embeddings" in self.hparams:
  2934. self.hparams["max_position_embeddings"] -= self._position_offset
  2935. else:
  2936. self._position_offset = None
  2937. def _xlmroberta_set_vocab(self) -> None:
  2938. # to avoid TypeError: Descriptors cannot be created directly
  2939. # exception when importing sentencepiece_model_pb2
  2940. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  2941. from sentencepiece import SentencePieceProcessor
  2942. from sentencepiece import sentencepiece_model_pb2 as model
  2943. tokenizer_path = self.dir_model / 'sentencepiece.bpe.model'
  2944. if not tokenizer_path.is_file():
  2945. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  2946. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2947. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2948. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  2949. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2950. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  2951. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  2952. tokenizer = SentencePieceProcessor()
  2953. tokenizer.LoadFromFile(str(tokenizer_path))
  2954. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2955. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2956. scores: list[float] = [-10000.0] * vocab_size
  2957. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2958. for token_id in range(tokenizer.vocab_size()):
  2959. piece = tokenizer.IdToPiece(token_id)
  2960. text = piece.encode("utf-8")
  2961. score = tokenizer.GetScore(token_id)
  2962. toktype = SentencePieceTokenTypes.NORMAL
  2963. if tokenizer.IsUnknown(token_id):
  2964. toktype = SentencePieceTokenTypes.UNKNOWN
  2965. elif tokenizer.IsControl(token_id):
  2966. toktype = SentencePieceTokenTypes.CONTROL
  2967. elif tokenizer.IsUnused(token_id):
  2968. toktype = SentencePieceTokenTypes.UNUSED
  2969. elif tokenizer.IsByte(token_id):
  2970. toktype = SentencePieceTokenTypes.BYTE
  2971. tokens[token_id] = text
  2972. scores[token_id] = score
  2973. toktypes[token_id] = toktype
  2974. if vocab_size > len(tokens):
  2975. pad_count = vocab_size - len(tokens)
  2976. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  2977. for i in range(1, pad_count + 1):
  2978. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  2979. scores.append(-1000.0)
  2980. toktypes.append(SentencePieceTokenTypes.UNUSED)
  2981. # realign tokens (see HF tokenizer code)
  2982. tokens = [b'<s>', b'<pad>', b'</s>', b'<unk>'] + tokens[3:-1]
  2983. scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1]
  2984. toktypes = [
  2985. SentencePieceTokenTypes.CONTROL,
  2986. SentencePieceTokenTypes.CONTROL,
  2987. SentencePieceTokenTypes.CONTROL,
  2988. SentencePieceTokenTypes.UNKNOWN,
  2989. ] + toktypes[3:-1]
  2990. self.gguf_writer.add_tokenizer_model("t5")
  2991. self.gguf_writer.add_tokenizer_pre("default")
  2992. self.gguf_writer.add_token_list(tokens)
  2993. self.gguf_writer.add_token_scores(scores)
  2994. self.gguf_writer.add_token_types(toktypes)
  2995. self.gguf_writer.add_add_space_prefix(add_prefix)
  2996. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  2997. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  2998. if precompiled_charsmap:
  2999. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  3000. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3001. special_vocab.add_to_gguf(self.gguf_writer)
  3002. self.gguf_writer.add_add_bos_token(True)
  3003. self.gguf_writer.add_add_eos_token(True)
  3004. @ModelBase.register("RobertaModel")
  3005. class RobertaModel(BertModel):
  3006. model_arch = gguf.MODEL_ARCH.BERT
  3007. def __init__(self, *args, **kwargs):
  3008. super().__init__(*args, **kwargs)
  3009. # we need the pad_token_id to know how to chop down position_embd matrix
  3010. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3011. self._position_offset = 1 + pad_token_id
  3012. if "max_position_embeddings" in self.hparams:
  3013. self.hparams["max_position_embeddings"] -= self._position_offset
  3014. else:
  3015. self._position_offset = None
  3016. def set_vocab(self):
  3017. """Support BPE tokenizers for roberta models"""
  3018. bpe_tok_path = self.dir_model / "tokenizer.json"
  3019. if bpe_tok_path.exists():
  3020. self._set_vocab_gpt2()
  3021. self.gguf_writer.add_add_bos_token(True)
  3022. self.gguf_writer.add_add_eos_token(True)
  3023. # we need this to validate the size of the token_type embeddings
  3024. # though currently we are passing all zeros to the token_type embeddings
  3025. # "Sequence A" or "Sequence B"
  3026. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3027. else:
  3028. return super().set_vocab()
  3029. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3030. # if name starts with "roberta.", remove the prefix
  3031. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  3032. if name.startswith("roberta."):
  3033. name = name[8:]
  3034. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  3035. if name == "embeddings.position_embeddings.weight":
  3036. if self._position_offset is not None:
  3037. data_torch = data_torch[self._position_offset:,:]
  3038. return super().modify_tensors(data_torch, name, bid)
  3039. @ModelBase.register("NomicBertModel")
  3040. class NomicBertModel(BertModel):
  3041. model_arch = gguf.MODEL_ARCH.BERT
  3042. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  3043. hparams = kwargs.pop("hparams", None)
  3044. if hparams is None:
  3045. hparams = ModelBase.load_hparams(dir_model)
  3046. self.is_moe = bool(hparams.get("moe_every_n_layers"))
  3047. self.model_arch = gguf.MODEL_ARCH.NOMIC_BERT_MOE if self.is_moe else gguf.MODEL_ARCH.NOMIC_BERT
  3048. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  3049. self._tokenizer_is_xlmroberta = self._is_tokenizer_xlmroberta()
  3050. if self._tokenizer_is_xlmroberta:
  3051. self._xlmroberta_tokenizer_init()
  3052. npos, mtp = self.hparams["n_positions"], self.hparams.get("max_trained_positions", 2048)
  3053. if npos == 8192 and mtp == 2048:
  3054. self.hparams["n_positions"] = 2048 # nomic-embed-text v1 and v1.5 are trained for 2048 tokens.
  3055. elif npos == 2048 and mtp == 2048:
  3056. self.hparams["n_positions"] = 512 # nomic-embed-text-v2-moe is trained for 512 tokens.
  3057. else:
  3058. raise ValueError(f"unrecognized parameters: n_positions={npos}, max_trained_positions={mtp}")
  3059. assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu"
  3060. # this doesn't do anything in the HF version
  3061. assert self.hparams["causal"] is False
  3062. # no bias tensors unless MoE
  3063. assert self.hparams["qkv_proj_bias"] == self.is_moe
  3064. assert self.hparams["mlp_fc1_bias"] == self.is_moe
  3065. assert self.hparams["mlp_fc2_bias"] == self.is_moe
  3066. # norm at end of layer
  3067. assert self.hparams["prenorm"] is False
  3068. # standard RoPE
  3069. assert self.hparams["rotary_emb_fraction"] == 1.0
  3070. assert self.hparams["rotary_emb_interleaved"] is False
  3071. assert self.hparams["rotary_emb_scale_base"] is None
  3072. def set_vocab(self) -> None:
  3073. if self._tokenizer_is_xlmroberta:
  3074. return self._xlmroberta_set_vocab()
  3075. return super().set_vocab()
  3076. def modify_tensors(self, data_torch: torch.Tensor, name: str, bid: int | None) -> Iterable[tuple[str, torch.Tensor]]:
  3077. # If the tensor is an experts bias tensor, skip it by returning an empty list.
  3078. if "mlp.experts.bias" in name:
  3079. return [] # Explicitly return an empty list.
  3080. if "mlp.experts.mlp.w1" in name:
  3081. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3082. name += ".weight"
  3083. if "mlp.experts.mlp.w2" in name:
  3084. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3085. data_torch = data_torch.transpose(1, 2)
  3086. name += ".weight"
  3087. return [(self.map_tensor_name(name), data_torch)]
  3088. def set_gguf_parameters(self):
  3089. super().set_gguf_parameters()
  3090. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  3091. if self.is_moe:
  3092. self.gguf_writer.add_moe_every_n_layers(self.hparams["moe_every_n_layers"])
  3093. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  3094. self.gguf_writer.add_expert_used_count(self.hparams["moe_top_k"])
  3095. def _is_tokenizer_xlmroberta(self) -> bool:
  3096. with open(self.dir_model / "tokenizer.json") as f:
  3097. tokenizer_json = json.load(f)
  3098. toktyp = tokenizer_json["model"]["type"]
  3099. if toktyp == "Unigram":
  3100. return True
  3101. if toktyp == "WordPiece":
  3102. return False
  3103. raise ValueError(f"unknown tokenizer: {toktyp}")
  3104. @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification")
  3105. class XLMRobertaModel(BertModel):
  3106. model_arch = gguf.MODEL_ARCH.BERT
  3107. def __init__(self, *args, **kwargs):
  3108. super().__init__(*args, **kwargs)
  3109. self._xlmroberta_tokenizer_init()
  3110. def set_vocab(self):
  3111. self._xlmroberta_set_vocab()
  3112. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3113. # if name starts with "roberta.", remove the prefix
  3114. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  3115. if name.startswith("roberta."):
  3116. name = name[8:]
  3117. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  3118. if name == "embeddings.position_embeddings.weight":
  3119. if self._position_offset is not None:
  3120. data_torch = data_torch[self._position_offset:,:]
  3121. return super().modify_tensors(data_torch, name, bid)
  3122. @ModelBase.register("GemmaForCausalLM")
  3123. class GemmaModel(TextModel):
  3124. model_arch = gguf.MODEL_ARCH.GEMMA
  3125. def set_vocab(self):
  3126. self._set_vocab_sentencepiece()
  3127. # TODO: these special tokens should be exported only for the CodeGemma family
  3128. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  3129. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  3130. special_vocab._set_special_token("prefix", 67)
  3131. special_vocab._set_special_token("suffix", 69)
  3132. special_vocab._set_special_token("middle", 68)
  3133. special_vocab._set_special_token("fsep", 70)
  3134. special_vocab._set_special_token("eot", 107)
  3135. special_vocab.chat_template = None # do not add it twice
  3136. special_vocab.add_to_gguf(self.gguf_writer)
  3137. self.gguf_writer.add_add_space_prefix(False)
  3138. def set_gguf_parameters(self):
  3139. hparams = self.hparams
  3140. block_count = hparams["num_hidden_layers"]
  3141. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  3142. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3143. self.gguf_writer.add_block_count(block_count)
  3144. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3145. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3146. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  3147. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3148. self.gguf_writer.add_key_length(hparams["head_dim"])
  3149. self.gguf_writer.add_value_length(hparams["head_dim"])
  3150. self.gguf_writer.add_file_type(self.ftype)
  3151. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3152. del bid # unused
  3153. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  3154. # To prevent errors, skip loading lm_head.weight.
  3155. if name == "lm_head.weight":
  3156. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  3157. return []
  3158. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  3159. if name.endswith("norm.weight"):
  3160. data_torch = data_torch + 1
  3161. return [(self.map_tensor_name(name), data_torch)]
  3162. @ModelBase.register("Gemma2ForCausalLM")
  3163. class Gemma2Model(TextModel):
  3164. model_arch = gguf.MODEL_ARCH.GEMMA2
  3165. def set_vocab(self):
  3166. self._set_vocab_sentencepiece()
  3167. self.gguf_writer.add_add_space_prefix(False)
  3168. def set_gguf_parameters(self):
  3169. hparams = self.hparams
  3170. block_count = hparams["num_hidden_layers"]
  3171. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  3172. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3173. self.gguf_writer.add_block_count(block_count)
  3174. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3175. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3176. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  3177. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3178. self.gguf_writer.add_key_length(hparams["head_dim"])
  3179. self.gguf_writer.add_value_length(hparams["head_dim"])
  3180. self.gguf_writer.add_file_type(self.ftype)
  3181. self.gguf_writer.add_attn_logit_softcapping(
  3182. self.hparams["attn_logit_softcapping"]
  3183. )
  3184. self.gguf_writer.add_final_logit_softcapping(
  3185. self.hparams["final_logit_softcapping"]
  3186. )
  3187. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  3188. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3189. del bid # unused
  3190. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  3191. # To prevent errors, skip loading lm_head.weight.
  3192. if name == "lm_head.weight":
  3193. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  3194. return []
  3195. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  3196. if name.endswith("norm.weight"):
  3197. data_torch = data_torch + 1
  3198. return [(self.map_tensor_name(name), data_torch)]
  3199. @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration")
  3200. class Gemma3Model(TextModel):
  3201. model_arch = gguf.MODEL_ARCH.GEMMA3
  3202. def set_vocab(self):
  3203. self._set_vocab_sentencepiece()
  3204. self.gguf_writer.add_add_space_prefix(False)
  3205. def set_gguf_parameters(self):
  3206. hparams = self.hparams
  3207. block_count = hparams["num_hidden_layers"]
  3208. # some default values are not specified in the hparams
  3209. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072))
  3210. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3211. self.gguf_writer.add_block_count(block_count)
  3212. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3213. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8))
  3214. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6))
  3215. self.gguf_writer.add_key_length(hparams.get("head_dim", 256))
  3216. self.gguf_writer.add_value_length(hparams.get("head_dim", 256))
  3217. self.gguf_writer.add_file_type(self.ftype)
  3218. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers
  3219. # both attn_logit_softcapping and final_logit_softcapping are removed in Gemma3
  3220. assert hparams.get("attn_logit_softcapping") is None
  3221. assert hparams.get("final_logit_softcapping") is None
  3222. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  3223. self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4))
  3224. if hparams.get("rope_scaling") is not None:
  3225. assert hparams["rope_scaling"]["rope_type"] == "linear"
  3226. # important: this rope_scaling is only applied for global layers, and not used by 1B model
  3227. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3228. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  3229. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3230. del bid # unused
  3231. if name.startswith("language_model."):
  3232. name = name.replace("language_model.", "")
  3233. elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  3234. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  3235. return [] # skip vision tensors
  3236. # remove OOV (out-of-vocabulary) rows in token_embd
  3237. if "embed_tokens.weight" in name:
  3238. vocab = self._create_vocab_sentencepiece()
  3239. tokens = vocab[0]
  3240. data_torch = data_torch[:len(tokens)]
  3241. # ref code in Gemma3RMSNorm
  3242. # output = output * (1.0 + self.weight.float())
  3243. if name.endswith("norm.weight"):
  3244. data_torch = data_torch + 1
  3245. return [(self.map_tensor_name(name), data_torch)]
  3246. @ModelBase.register("Gemma3ForConditionalGeneration")
  3247. class Gemma3VisionModel(VisionModel):
  3248. def set_gguf_parameters(self):
  3249. super().set_gguf_parameters()
  3250. hparams = self.hparams
  3251. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.GEMMA3)
  3252. # default values below are taken from HF tranformers code
  3253. self.gguf_writer.add_vision_attention_layernorm_eps(hparams.get("layer_norm_eps", 1e-6))
  3254. self.gguf_writer.add_vision_use_gelu(True)
  3255. # calculate proj_scale_factor (used by tinygemma3 test model)
  3256. image_seq_length = self.preprocessor_config.get("image_seq_length", 256)
  3257. n_per_side = int(image_seq_length ** 0.5)
  3258. image_size = self.hparams["image_size"]
  3259. patch_size = self.hparams["patch_size"]
  3260. proj_scale_factor = (image_size // patch_size) // n_per_side
  3261. if proj_scale_factor > 0 and proj_scale_factor != 4:
  3262. # we only need to write this if it's not the default value
  3263. # in this case, we are converting a test model
  3264. self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
  3265. def tensor_force_quant(self, name, new_name, bid, n_dims):
  3266. del bid, new_name, n_dims # unused
  3267. # related to https://github.com/ggml-org/llama.cpp/issues/13025
  3268. if "input_projection" in name:
  3269. return gguf.GGMLQuantizationType.F16
  3270. if ".embeddings." in name:
  3271. return gguf.GGMLQuantizationType.F32
  3272. return False
  3273. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3274. del bid # unused
  3275. if "vision_model.head." in name:
  3276. return [] # skip redundant tensors for tinygemma3
  3277. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  3278. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  3279. # process vision tensors
  3280. name = name.replace("_weight", ".weight")
  3281. # correct norm value ; only this "soft_emb_norm" need to be corrected as it's part of Gemma projector
  3282. # the other norm values are part of SigLIP model, and they are already correct
  3283. # ref code: Gemma3RMSNorm
  3284. if "soft_emb_norm.weight" in name:
  3285. logger.info(f"Correcting norm value for '{name}'")
  3286. data_torch = data_torch + 1
  3287. return [(self.map_tensor_name(name), data_torch)]
  3288. return [] # skip other tensors
  3289. @ModelBase.register("Starcoder2ForCausalLM")
  3290. class StarCoder2Model(TextModel):
  3291. model_arch = gguf.MODEL_ARCH.STARCODER2
  3292. @ModelBase.register("Rwkv6ForCausalLM")
  3293. class Rwkv6Model(TextModel):
  3294. model_arch = gguf.MODEL_ARCH.RWKV6
  3295. def set_vocab(self):
  3296. self._set_vocab_rwkv_world()
  3297. def set_gguf_parameters(self):
  3298. block_count = self.hparams["num_hidden_layers"]
  3299. head_size = self.hparams["head_size"]
  3300. hidden_size = self.hparams["hidden_size"]
  3301. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  3302. rescale_every_n_layers = self.hparams["rescale_every"]
  3303. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else int((hidden_size * 3.5) // 32 * 32)
  3304. time_mix_extra_dim = 64 if hidden_size == 4096 else 32
  3305. time_decay_extra_dim = 128 if hidden_size == 4096 else 64
  3306. # RWKV isn't context limited
  3307. self.gguf_writer.add_context_length(1048576)
  3308. self.gguf_writer.add_embedding_length(hidden_size)
  3309. self.gguf_writer.add_block_count(block_count)
  3310. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  3311. self.gguf_writer.add_rescale_every_n_layers(rescale_every_n_layers)
  3312. self.gguf_writer.add_wkv_head_size(head_size)
  3313. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  3314. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  3315. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3316. self.gguf_writer.add_file_type(self.ftype)
  3317. # required by llama.cpp, unused
  3318. self.gguf_writer.add_head_count(0)
  3319. lerp_weights: dict[int, dict[str, Tensor]] = {}
  3320. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3321. new_name = self.map_tensor_name(name)
  3322. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  3323. new_name += ".weight"
  3324. if new_name.endswith("time_mix_w1.weight") or new_name.endswith("time_mix_decay_w1.weight") or new_name.endswith("time_mix_decay_w2.weight"):
  3325. data_torch = data_torch.transpose(0, 1)
  3326. if new_name.endswith("time_mix_w2.weight"):
  3327. data_torch = data_torch.permute(0, 2, 1)
  3328. if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name:
  3329. data_torch = data_torch.squeeze()
  3330. try:
  3331. rescale_every_n_layers = self.hparams["rescale_every"]
  3332. if rescale_every_n_layers > 0:
  3333. if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):
  3334. data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers))
  3335. except KeyError:
  3336. pass
  3337. # concat time_mix_lerp weights to reduce some cpu overhead
  3338. # also reduces the number of tensors in the model
  3339. if bid is not None and "time_mix_lerp" in new_name and "time_mix_lerp_x" not in new_name:
  3340. try:
  3341. self.lerp_weights[bid][new_name] = data_torch
  3342. except KeyError:
  3343. self.lerp_weights[bid] = {new_name: data_torch}
  3344. if all(f"blk.{bid}.time_mix_lerp_{i}.weight" in self.lerp_weights[bid].keys() for i in ["w", "k", "v", "r", "g"]):
  3345. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3346. data = torch.stack([self.lerp_weights[bid][f"blk.{bid}.time_mix_lerp_{i}.weight"].unsqueeze(0) for i in ["w", "k", "v", "r", "g"]], dim=0).unsqueeze(1)
  3347. yield (new_name, data)
  3348. return
  3349. yield (new_name, data_torch)
  3350. @ModelBase.register("RWKV6Qwen2ForCausalLM")
  3351. class RWKV6Qwen2Model(Rwkv6Model):
  3352. model_arch = gguf.MODEL_ARCH.RWKV6QWEN2
  3353. def set_vocab(self):
  3354. try:
  3355. self._set_vocab_sentencepiece()
  3356. except FileNotFoundError:
  3357. self._set_vocab_gpt2()
  3358. def set_gguf_parameters(self):
  3359. block_count = self.hparams["num_hidden_layers"]
  3360. num_attention_heads = self.hparams["num_attention_heads"]
  3361. num_key_value_heads = self.hparams["num_key_value_heads"]
  3362. hidden_size = self.hparams["hidden_size"]
  3363. head_size = hidden_size // num_attention_heads
  3364. rms_norm_eps = self.hparams["rms_norm_eps"]
  3365. intermediate_size = self.hparams["intermediate_size"]
  3366. time_mix_extra_dim = self.hparams.get("lora_rank_tokenshift", 64 if hidden_size >= 4096 else 32)
  3367. time_decay_extra_dim = self.hparams.get("lora_rank_decay", 128 if hidden_size >= 4096 else 64)
  3368. # RWKV isn't context limited
  3369. self.gguf_writer.add_context_length(1048576)
  3370. self.gguf_writer.add_embedding_length(hidden_size)
  3371. self.gguf_writer.add_block_count(block_count)
  3372. self.gguf_writer.add_wkv_head_size(head_size)
  3373. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  3374. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  3375. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3376. self.gguf_writer.add_file_type(self.ftype)
  3377. # special parameters for time_mixing in RWKV6QWEN2
  3378. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3379. self.gguf_writer.add_token_shift_count(1)
  3380. # RWKV6QWEN2 use grouped key/value like GQA
  3381. self.gguf_writer.add_head_count_kv(num_key_value_heads)
  3382. # required by llama.cpp, unused
  3383. self.gguf_writer.add_head_count(0)
  3384. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3385. for new_name, data in super().modify_tensors(data_torch, name, bid):
  3386. if "time_mix_w1" in new_name or "time_mix_w2" in new_name:
  3387. data = data.view(5, -1, data.shape[-1])
  3388. # rwkv6qwen2 has a different order of rkvwg instead of the original wkvrg
  3389. # permute them here to avoid code changes
  3390. data = torch.stack([data[3], data[1], data[2], data[0], data[4]], dim=0).view(-1, data.shape[-1])
  3391. if "w2" in new_name:
  3392. data = data.view(5, -1, data.shape[-1])
  3393. yield (new_name, data)
  3394. continue
  3395. yield (new_name, data)
  3396. @ModelBase.register("Rwkv7ForCausalLM", "RWKV7ForCausalLM")
  3397. class Rwkv7Model(TextModel):
  3398. model_arch = gguf.MODEL_ARCH.RWKV7
  3399. def set_vocab(self):
  3400. self._set_vocab_rwkv_world()
  3401. def calc_lora_rank(self, hidden_size, exponent, multiplier):
  3402. return max(1, round(hidden_size ** exponent * multiplier / 32)) * 32
  3403. def set_gguf_parameters(self):
  3404. block_count = self.hparams["num_hidden_layers"]
  3405. try:
  3406. head_size = self.hparams["head_size"]
  3407. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  3408. except KeyError:
  3409. head_size = self.hparams["head_dim"]
  3410. layer_norm_eps = self.hparams["norm_eps"]
  3411. hidden_size = self.hparams["hidden_size"]
  3412. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else (hidden_size * 4)
  3413. # ICLR: In-Context-Learning-Rate
  3414. try:
  3415. lora_rank_decay = self.hparams["lora_rank_decay"] if self.hparams["lora_rank_decay"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3416. lora_rank_iclr = self.hparams["lora_rank_iclr"] if self.hparams["lora_rank_iclr"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3417. lora_rank_value_residual_mix = self.hparams["lora_rank_value_residual_mix"] if self.hparams["lora_rank_value_residual_mix"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  3418. lora_rank_gate = self.hparams["lora_rank_gate"] if self.hparams["lora_rank_gate"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  3419. except KeyError:
  3420. lora_rank_decay = self.hparams["decay_low_rank_dim"] if self.hparams["decay_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3421. lora_rank_iclr = self.hparams["a_low_rank_dim"] if self.hparams["a_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3422. lora_rank_value_residual_mix = self.hparams["v_low_rank_dim"] if self.hparams["v_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  3423. lora_rank_gate = self.hparams["gate_low_rank_dim"] if self.hparams["gate_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  3424. # RWKV isn't context limited
  3425. self.gguf_writer.add_context_length(1048576)
  3426. self.gguf_writer.add_embedding_length(hidden_size)
  3427. self.gguf_writer.add_block_count(block_count)
  3428. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  3429. self.gguf_writer.add_wkv_head_size(head_size)
  3430. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  3431. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  3432. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  3433. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  3434. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3435. self.gguf_writer.add_file_type(self.ftype)
  3436. # required by llama.cpp, unused
  3437. self.gguf_writer.add_head_count(0)
  3438. lerp_weights: dict[int, dict[str, Tensor]] = {}
  3439. lora_needs_transpose: bool = True
  3440. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3441. # unify tensor names here to make life easier
  3442. name = name.replace("blocks", "layers").replace("ffn", "feed_forward")
  3443. name = name.replace("self_attn", "attention").replace("attn", "attention")
  3444. name = name.replace("time_mixer.", "")
  3445. # lora layer names in fla-hub's impl
  3446. if "_lora.lora" in name:
  3447. self.lora_needs_transpose = False
  3448. name = name.replace("_lora.lora.0.weight", "1.weight")
  3449. name = name.replace("_lora.lora.2.weight", "2.weight")
  3450. name = name.replace("_lora.lora.2.bias", "0.weight")
  3451. name = name.replace("feed_forward_norm", "ln2")
  3452. name = name.replace("g_norm", "ln_x")
  3453. if "attention.v" in name and "value" not in self.map_tensor_name(name) and bid == 0:
  3454. # some models have dummy v0/v1/v2 on first layer while others don't
  3455. # ignore them all since they are not used
  3456. return
  3457. wkv_has_gate = self.hparams.get("wkv_has_gate", True)
  3458. lerp_list = ["r", "w", "k", "v", "a", "g"] if wkv_has_gate else ["r", "w", "k", "v", "a"]
  3459. if bid is not None and "attention.x_" in name:
  3460. if "attention.x_x" in name:
  3461. # already concatenated
  3462. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3463. data = data_torch.reshape(len(lerp_list), 1, 1, -1)
  3464. yield (new_name, data)
  3465. else:
  3466. try:
  3467. self.lerp_weights[bid][name] = data_torch
  3468. except KeyError:
  3469. self.lerp_weights[bid] = {name: data_torch}
  3470. if all(f"model.layers.{bid}.attention.x_{i}" in self.lerp_weights[bid].keys() for i in lerp_list):
  3471. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3472. data = torch.stack([self.lerp_weights[bid][f"model.layers.{bid}.attention.x_{i}"] for i in lerp_list], dim=0)
  3473. yield (new_name, data)
  3474. return
  3475. else:
  3476. data_torch = data_torch.squeeze()
  3477. new_name = self.map_tensor_name(name)
  3478. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  3479. new_name += ".weight"
  3480. if self.lora_needs_transpose and any(
  3481. new_name.endswith(t) for t in [
  3482. "time_mix_w1.weight", "time_mix_w2.weight",
  3483. "time_mix_a1.weight", "time_mix_a2.weight",
  3484. "time_mix_v1.weight", "time_mix_v2.weight",
  3485. "time_mix_g1.weight", "time_mix_g2.weight",
  3486. ]
  3487. ):
  3488. data_torch = data_torch.transpose(0, 1)
  3489. if 'r_k' in new_name:
  3490. data_torch = data_torch.flatten()
  3491. if bid == 0 and "time_mix_a" in new_name:
  3492. # dummy v0/v1/v2 on first layer
  3493. # easist way to make llama happy
  3494. yield (new_name.replace("time_mix_a", "time_mix_v"), data_torch)
  3495. yield (new_name, data_torch)
  3496. @ModelBase.register("RwkvHybridForCausalLM")
  3497. class ARwkv7Model(Rwkv7Model):
  3498. model_arch = gguf.MODEL_ARCH.ARWKV7
  3499. def set_vocab(self):
  3500. try:
  3501. self._set_vocab_sentencepiece()
  3502. except FileNotFoundError:
  3503. self._set_vocab_gpt2()
  3504. def set_gguf_parameters(self):
  3505. block_count = self.hparams["num_hidden_layers"]
  3506. hidden_size = self.hparams["hidden_size"]
  3507. head_size = self.hparams["head_size"]
  3508. rms_norm_eps = self.hparams["rms_norm_eps"]
  3509. intermediate_size = self.hparams["intermediate_size"]
  3510. wkv_has_gate = self.hparams["wkv_has_gate"]
  3511. assert self.hparams["wkv_version"] == 7
  3512. # ICLR: In-Context-Learning-Rate
  3513. lora_rank_decay = 64
  3514. lora_rank_iclr = 64
  3515. lora_rank_value_residual_mix = 32
  3516. lora_rank_gate = 128 if wkv_has_gate else 0
  3517. # RWKV isn't context limited
  3518. self.gguf_writer.add_context_length(1048576)
  3519. self.gguf_writer.add_embedding_length(hidden_size)
  3520. self.gguf_writer.add_block_count(block_count)
  3521. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3522. self.gguf_writer.add_wkv_head_size(head_size)
  3523. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  3524. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  3525. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  3526. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  3527. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3528. self.gguf_writer.add_file_type(self.ftype)
  3529. self.gguf_writer.add_token_shift_count(1)
  3530. # required by llama.cpp, unused
  3531. self.gguf_writer.add_head_count(0)
  3532. @ModelBase.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
  3533. class MambaModel(TextModel):
  3534. model_arch = gguf.MODEL_ARCH.MAMBA
  3535. def set_vocab(self):
  3536. vocab_size = self.hparams["vocab_size"]
  3537. # Round vocab size to next multiple of 8
  3538. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  3539. # pad using ceiling division
  3540. # ref: https://stackoverflow.com/a/17511341/22827863
  3541. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  3542. self.hparams["vocab_size"] = vocab_size
  3543. if (self.dir_model / "tokenizer.json").is_file():
  3544. self._set_vocab_gpt2()
  3545. elif (self.dir_model / "tokenizer.model").is_file():
  3546. self._set_vocab_sentencepiece()
  3547. else:
  3548. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  3549. self._set_vocab_builtin("gpt-neox", vocab_size)
  3550. def set_gguf_parameters(self):
  3551. d_model = self.find_hparam(["hidden_size", "d_model"])
  3552. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  3553. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  3554. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  3555. # ceiling division
  3556. # ref: https://stackoverflow.com/a/17511341/22827863
  3557. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  3558. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  3559. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  3560. use_dt_b_c_norm = False
  3561. # For falconmamba we do apply RMS norm on B / DT and C layers
  3562. if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
  3563. use_dt_b_c_norm = True
  3564. # Fail early for models which don't have a block expansion factor of 2
  3565. assert d_inner == 2 * d_model
  3566. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  3567. self.gguf_writer.add_embedding_length(d_model)
  3568. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  3569. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  3570. self.gguf_writer.add_block_count(self.block_count)
  3571. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  3572. self.gguf_writer.add_ssm_inner_size(d_inner)
  3573. self.gguf_writer.add_ssm_state_size(d_state)
  3574. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  3575. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3576. self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
  3577. self.gguf_writer.add_file_type(self.ftype)
  3578. _tok_embd = None
  3579. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3580. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  3581. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  3582. new_name = self.map_tensor_name(name)
  3583. if name.endswith(".A_log"):
  3584. logger.debug("A_log --> A ==> " + new_name)
  3585. data_torch = -torch.exp(data_torch)
  3586. # [4 1 8192 1] -> [4 8192 1 1]
  3587. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  3588. data_torch = data_torch.squeeze()
  3589. # assuming token_embd.weight is seen before output.weight
  3590. if self._tok_embd is not None and new_name == output_name:
  3591. if torch.equal(self._tok_embd, data_torch):
  3592. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  3593. return []
  3594. elif new_name == tok_embd_name:
  3595. self._tok_embd = data_torch
  3596. return [(new_name, data_torch)]
  3597. @ModelBase.register("CohereForCausalLM")
  3598. class CommandR2Model(TextModel):
  3599. model_arch = gguf.MODEL_ARCH.COMMAND_R
  3600. def __init__(self, *args, **kwargs):
  3601. super().__init__(*args, **kwargs)
  3602. # max_position_embeddings = 8192 in config.json but model was actually
  3603. # trained on 128k context length
  3604. # aya-23 models don't have model_max_length specified
  3605. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  3606. def set_gguf_parameters(self):
  3607. super().set_gguf_parameters()
  3608. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  3609. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3610. @ModelBase.register("Cohere2ForCausalLM")
  3611. class Cohere2Model(TextModel):
  3612. model_arch = gguf.MODEL_ARCH.COHERE2
  3613. def set_gguf_parameters(self):
  3614. super().set_gguf_parameters()
  3615. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  3616. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  3617. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  3618. rotary_pct = self.hparams["rotary_pct"]
  3619. hidden_size = self.hparams["hidden_size"]
  3620. num_attention_heads = self.hparams["num_attention_heads"]
  3621. self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
  3622. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3623. @ModelBase.register("OlmoForCausalLM")
  3624. @ModelBase.register("OLMoForCausalLM")
  3625. class OlmoModel(TextModel):
  3626. model_arch = gguf.MODEL_ARCH.OLMO
  3627. def set_gguf_parameters(self):
  3628. super().set_gguf_parameters()
  3629. self.gguf_writer.add_layer_norm_eps(1e-5)
  3630. clip_qkv = self.hparams.get("clip_qkv")
  3631. if clip_qkv is not None:
  3632. self.gguf_writer.add_clamp_kqv(clip_qkv)
  3633. # Same as super class, but permuting q_proj, k_proj
  3634. # Copied from: LlamaModel
  3635. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3636. del bid # unused
  3637. n_head = self.hparams["num_attention_heads"]
  3638. n_kv_head = self.hparams.get("num_key_value_heads")
  3639. if name.endswith("q_proj.weight"):
  3640. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3641. if name.endswith("k_proj.weight"):
  3642. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3643. return [(self.map_tensor_name(name), data_torch)]
  3644. @ModelBase.register("Olmo2ForCausalLM")
  3645. class Olmo2Model(TextModel):
  3646. model_arch = gguf.MODEL_ARCH.OLMO2
  3647. @ModelBase.register("OlmoeForCausalLM")
  3648. class OlmoeModel(TextModel):
  3649. model_arch = gguf.MODEL_ARCH.OLMOE
  3650. def set_gguf_parameters(self):
  3651. super().set_gguf_parameters()
  3652. self.gguf_writer.add_layer_norm_rms_eps(1e-5)
  3653. if (n_experts := self.hparams.get("num_experts")) is not None:
  3654. self.gguf_writer.add_expert_count(n_experts)
  3655. _experts: list[dict[str, Tensor]] | None = None
  3656. # Copied from: Qwen2MoeModel
  3657. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3658. # process the experts separately
  3659. if name.find("experts") != -1:
  3660. n_experts = self.hparams["num_experts"]
  3661. assert bid is not None
  3662. if self._experts is None:
  3663. self._experts = [{} for _ in range(self.block_count)]
  3664. self._experts[bid][name] = data_torch
  3665. if len(self._experts[bid]) >= n_experts * 3:
  3666. tensors: list[tuple[str, Tensor]] = []
  3667. # merge the experts into a single 3d tensor
  3668. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3669. datas: list[Tensor] = []
  3670. for xid in range(n_experts):
  3671. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3672. datas.append(self._experts[bid][ename])
  3673. del self._experts[bid][ename]
  3674. data_torch = torch.stack(datas, dim=0)
  3675. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3676. new_name = self.map_tensor_name(merged_name)
  3677. tensors.append((new_name, data_torch))
  3678. return tensors
  3679. else:
  3680. return []
  3681. return [(self.map_tensor_name(name), data_torch)]
  3682. # Copied from: Qwen2MoeModel
  3683. def prepare_tensors(self):
  3684. super().prepare_tensors()
  3685. if self._experts is not None:
  3686. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3687. experts = [k for d in self._experts for k in d.keys()]
  3688. if len(experts) > 0:
  3689. raise ValueError(f"Unprocessed experts: {experts}")
  3690. @ModelBase.register("JinaBertModel", "JinaBertForMaskedLM")
  3691. class JinaBertV2Model(BertModel):
  3692. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  3693. def __init__(self, *args, **kwargs):
  3694. super().__init__(*args, **kwargs)
  3695. self.intermediate_size = self.hparams["intermediate_size"]
  3696. def get_tensors(self):
  3697. for name, data in super().get_tensors():
  3698. if 'gated_layer' in name:
  3699. d1 = data[:self.intermediate_size, :]
  3700. name1 = name.replace('gated_layers', 'gated_layers_w')
  3701. name1 = name1.replace('up_gated_layer', 'gated_layers_v')
  3702. d2 = data[self.intermediate_size:, :]
  3703. name2 = name.replace('gated_layers', 'gated_layers_v')
  3704. name2 = name2.replace('up_gated_layer', 'gated_layers_w')
  3705. yield name1, d1
  3706. yield name2, d2
  3707. continue
  3708. yield name, data
  3709. def set_vocab(self):
  3710. tokenizer_class = 'BertTokenizer'
  3711. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  3712. tokenizer_class = json.load(f)['tokenizer_class']
  3713. if tokenizer_class == 'BertTokenizer':
  3714. super().set_vocab()
  3715. elif tokenizer_class == 'RobertaTokenizer':
  3716. self._set_vocab_gpt2()
  3717. self.gguf_writer.add_token_type_count(2)
  3718. else:
  3719. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  3720. self.gguf_writer.add_add_bos_token(True)
  3721. self.gguf_writer.add_add_eos_token(True)
  3722. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3723. # if name starts with "bert.", remove the prefix
  3724. # e.g. https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  3725. if name.startswith("bert."):
  3726. name = name[5:]
  3727. return super().modify_tensors(data_torch, name, bid)
  3728. @ModelBase.register("OpenELMForCausalLM")
  3729. class OpenELMModel(TextModel):
  3730. model_arch = gguf.MODEL_ARCH.OPENELM
  3731. @staticmethod
  3732. def _make_divisible(v: float | int, divisor: int) -> int:
  3733. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  3734. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  3735. # Make sure that round down does not go down by more than 10%.
  3736. if new_v < 0.9 * v:
  3737. new_v += divisor
  3738. return new_v
  3739. def __init__(self, *args, **kwargs):
  3740. super().__init__(*args, **kwargs)
  3741. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  3742. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  3743. self._n_embd: int = self.hparams["model_dim"]
  3744. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  3745. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  3746. self._ffn_dims: list[int] = [
  3747. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  3748. for multiplier in ffn_multipliers
  3749. ]
  3750. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  3751. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  3752. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  3753. def set_vocab(self):
  3754. try:
  3755. self._set_vocab_sentencepiece()
  3756. except FileNotFoundError:
  3757. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  3758. def set_gguf_parameters(self):
  3759. n_embd = self._n_embd
  3760. head_dim = self.hparams["head_dim"]
  3761. rot_pct = 1.0
  3762. assert self.block_count == len(self._num_kv_heads)
  3763. assert self.block_count == len(self._num_query_heads)
  3764. assert self.block_count == len(self._ffn_dims)
  3765. self.gguf_writer.add_block_count(self.block_count)
  3766. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  3767. self.gguf_writer.add_embedding_length(n_embd)
  3768. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  3769. self.gguf_writer.add_head_count(self._num_query_heads)
  3770. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  3771. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  3772. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  3773. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  3774. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  3775. self.gguf_writer.add_key_length(head_dim)
  3776. self.gguf_writer.add_value_length(head_dim)
  3777. self.gguf_writer.add_file_type(self.ftype)
  3778. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  3779. if "n_layers" in keys:
  3780. return self.hparams["num_transformer_layers"]
  3781. return super().find_hparam(keys, optional)
  3782. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3783. # split ff
  3784. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  3785. ff_dim = self._ffn_dims[bid]
  3786. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  3787. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  3788. return
  3789. yield (self.map_tensor_name(name), data_torch)
  3790. @ModelBase.register("ArcticForCausalLM")
  3791. class ArcticModel(TextModel):
  3792. model_arch = gguf.MODEL_ARCH.ARCTIC
  3793. def set_vocab(self):
  3794. # The reason for using a custom implementation here is that the
  3795. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  3796. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  3797. from sentencepiece import SentencePieceProcessor
  3798. tokenizer_path = self.dir_model / 'tokenizer.model'
  3799. if not tokenizer_path.is_file():
  3800. logger.error(f'Error: Missing {tokenizer_path}')
  3801. sys.exit(1)
  3802. # Read the whole vocabulary from the tokenizer.model file
  3803. tokenizer = SentencePieceProcessor()
  3804. tokenizer.LoadFromFile(str(tokenizer_path))
  3805. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3806. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3807. scores: list[float] = [-10000.0] * vocab_size
  3808. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3809. for token_id in range(tokenizer.vocab_size()):
  3810. piece = tokenizer.IdToPiece(token_id)
  3811. text = piece.encode("utf-8")
  3812. score = tokenizer.GetScore(token_id)
  3813. toktype = SentencePieceTokenTypes.NORMAL
  3814. if tokenizer.IsUnknown(token_id):
  3815. toktype = SentencePieceTokenTypes.UNKNOWN
  3816. elif tokenizer.IsControl(token_id):
  3817. toktype = SentencePieceTokenTypes.CONTROL
  3818. elif tokenizer.IsUnused(token_id):
  3819. toktype = SentencePieceTokenTypes.UNUSED
  3820. elif tokenizer.IsByte(token_id):
  3821. toktype = SentencePieceTokenTypes.BYTE
  3822. tokens[token_id] = text
  3823. scores[token_id] = score
  3824. toktypes[token_id] = toktype
  3825. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  3826. # of information about added/redefined tokens and modify them accordingly.
  3827. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3828. if tokenizer_config_file.is_file():
  3829. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3830. tokenizer_config_json = json.load(f)
  3831. if "added_tokens_decoder" in tokenizer_config_json:
  3832. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  3833. for token_id, token_json in added_tokens_decoder.items():
  3834. token_id = int(token_id)
  3835. if token_id >= vocab_size:
  3836. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  3837. continue
  3838. token_content = token_json["content"]
  3839. token_type = SentencePieceTokenTypes.USER_DEFINED
  3840. token_score = -10000.0
  3841. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  3842. # Set the score to 0.0 as in the original tokenizer.model
  3843. if ("special" in token_json) and token_json["special"]:
  3844. if token_content == tokenizer_config_json["unk_token"]:
  3845. token_type = SentencePieceTokenTypes.UNKNOWN
  3846. else:
  3847. token_type = SentencePieceTokenTypes.CONTROL
  3848. token_score = 0.0
  3849. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  3850. tokens[token_id] = token_content.encode("utf-8")
  3851. toktypes[token_id] = token_type
  3852. scores[token_id] = token_score
  3853. self.gguf_writer.add_tokenizer_model("llama")
  3854. self.gguf_writer.add_tokenizer_pre("default")
  3855. self.gguf_writer.add_token_list(tokens)
  3856. self.gguf_writer.add_token_scores(scores)
  3857. self.gguf_writer.add_token_types(toktypes)
  3858. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3859. special_vocab.add_to_gguf(self.gguf_writer)
  3860. def set_gguf_parameters(self):
  3861. super().set_gguf_parameters()
  3862. hparams = self.hparams
  3863. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3864. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  3865. _experts: list[dict[str, Tensor]] | None = None
  3866. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3867. n_head = self.hparams["num_attention_heads"]
  3868. n_kv_head = self.hparams.get("num_key_value_heads")
  3869. if name.endswith("q_proj.weight"):
  3870. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3871. if name.endswith("k_proj.weight"):
  3872. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3873. # process the experts separately
  3874. if name.find("block_sparse_moe.experts") != -1:
  3875. n_experts = self.hparams["num_local_experts"]
  3876. assert bid is not None
  3877. if self._experts is None:
  3878. self._experts = [{} for _ in range(self.block_count)]
  3879. self._experts[bid][name] = data_torch
  3880. if len(self._experts[bid]) >= n_experts * 3:
  3881. tensors: list[tuple[str, Tensor]] = []
  3882. # merge the experts into a single 3d tensor
  3883. for wid in ["w1", "w2", "w3"]:
  3884. datas: list[Tensor] = []
  3885. for xid in range(n_experts):
  3886. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  3887. datas.append(self._experts[bid][ename])
  3888. del self._experts[bid][ename]
  3889. data_torch = torch.stack(datas, dim=0)
  3890. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  3891. new_name = self.map_tensor_name(merged_name)
  3892. tensors.append((new_name, data_torch))
  3893. return tensors
  3894. else:
  3895. return []
  3896. return [(self.map_tensor_name(name), data_torch)]
  3897. def prepare_tensors(self):
  3898. super().prepare_tensors()
  3899. if self._experts is not None:
  3900. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3901. experts = [k for d in self._experts for k in d.keys()]
  3902. if len(experts) > 0:
  3903. raise ValueError(f"Unprocessed experts: {experts}")
  3904. @ModelBase.register("DeepseekForCausalLM")
  3905. class DeepseekModel(TextModel):
  3906. model_arch = gguf.MODEL_ARCH.DEEPSEEK
  3907. def set_vocab(self):
  3908. try:
  3909. self._set_vocab_sentencepiece()
  3910. except FileNotFoundError:
  3911. self._set_vocab_gpt2()
  3912. def set_gguf_parameters(self):
  3913. super().set_gguf_parameters()
  3914. hparams = self.hparams
  3915. if "head_dim" in hparams:
  3916. rope_dim = hparams["head_dim"]
  3917. else:
  3918. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  3919. self.gguf_writer.add_rope_dimension_count(rope_dim)
  3920. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3921. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  3922. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3923. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  3924. self.gguf_writer.add_expert_weights_scale(1.0)
  3925. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  3926. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  3927. _experts: list[dict[str, Tensor]] | None = None
  3928. @staticmethod
  3929. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  3930. if n_head_kv is not None and n_head != n_head_kv:
  3931. n_head = n_head_kv
  3932. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  3933. .swapaxes(1, 2)
  3934. .reshape(weights.shape))
  3935. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3936. n_head = self.hparams["num_attention_heads"]
  3937. n_kv_head = self.hparams.get("num_key_value_heads")
  3938. if name.endswith(("q_proj.weight", "q_proj.bias")):
  3939. data_torch = DeepseekModel.permute(data_torch, n_head, n_head)
  3940. if name.endswith(("k_proj.weight", "k_proj.bias")):
  3941. data_torch = DeepseekModel.permute(data_torch, n_head, n_kv_head)
  3942. # process the experts separately
  3943. if name.find("mlp.experts") != -1:
  3944. n_experts = self.hparams["n_routed_experts"]
  3945. assert bid is not None
  3946. if self._experts is None:
  3947. self._experts = [{} for _ in range(self.block_count)]
  3948. self._experts[bid][name] = data_torch
  3949. if len(self._experts[bid]) >= n_experts * 3:
  3950. tensors: list[tuple[str, Tensor]] = []
  3951. # merge the experts into a single 3d tensor
  3952. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3953. datas: list[Tensor] = []
  3954. for xid in range(n_experts):
  3955. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3956. datas.append(self._experts[bid][ename])
  3957. del self._experts[bid][ename]
  3958. data_torch = torch.stack(datas, dim=0)
  3959. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3960. new_name = self.map_tensor_name(merged_name)
  3961. tensors.append((new_name, data_torch))
  3962. return tensors
  3963. else:
  3964. return []
  3965. return [(self.map_tensor_name(name), data_torch)]
  3966. def prepare_tensors(self):
  3967. super().prepare_tensors()
  3968. if self._experts is not None:
  3969. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3970. experts = [k for d in self._experts for k in d.keys()]
  3971. if len(experts) > 0:
  3972. raise ValueError(f"Unprocessed experts: {experts}")
  3973. @ModelBase.register("DeepseekV2ForCausalLM")
  3974. @ModelBase.register("DeepseekV3ForCausalLM")
  3975. class DeepseekV2Model(TextModel):
  3976. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  3977. def set_vocab(self):
  3978. self._set_vocab_gpt2()
  3979. def set_gguf_parameters(self):
  3980. # note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
  3981. self.hparams["num_key_value_heads"] = 1
  3982. super().set_gguf_parameters()
  3983. hparams = self.hparams
  3984. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  3985. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3986. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  3987. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  3988. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  3989. # note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
  3990. self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
  3991. self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
  3992. self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  3993. self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
  3994. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  3995. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  3996. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  3997. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  3998. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  3999. if hparams["scoring_func"] == "sigmoid":
  4000. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  4001. elif hparams["scoring_func"] == "softmax":
  4002. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  4003. else:
  4004. raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
  4005. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  4006. rope_scaling = self.hparams.get("rope_scaling") or {}
  4007. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4008. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4009. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4010. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4011. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * rope_scaling["mscale_all_dim"])
  4012. _experts: list[dict[str, Tensor]] | None = None
  4013. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4014. # rename e_score_correction_bias tensors
  4015. if name.endswith("e_score_correction_bias"):
  4016. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  4017. # skip Multi-Token Prediction (MTP) layers
  4018. block_count = self.hparams["num_hidden_layers"]
  4019. match = re.match(r"model.layers.(\d+)", name)
  4020. if match and int(match.group(1)) >= block_count:
  4021. return []
  4022. # process the experts separately
  4023. if name.find("mlp.experts") != -1:
  4024. n_experts = self.hparams["n_routed_experts"]
  4025. assert bid is not None
  4026. if self._experts is None:
  4027. self._experts = [{} for _ in range(self.block_count)]
  4028. self._experts[bid][name] = data_torch
  4029. if len(self._experts[bid]) >= n_experts * 3:
  4030. tensors: list[tuple[str, Tensor]] = []
  4031. # merge the experts into a single 3d tensor
  4032. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4033. datas: list[Tensor] = []
  4034. for xid in range(n_experts):
  4035. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4036. datas.append(self._experts[bid][ename])
  4037. del self._experts[bid][ename]
  4038. data_torch = torch.stack(datas, dim=0)
  4039. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  4040. new_name = self.map_tensor_name(merged_name)
  4041. tensors.append((new_name, data_torch))
  4042. return tensors
  4043. else:
  4044. return []
  4045. # note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
  4046. if name.endswith("kv_b_proj.weight"):
  4047. name_kb = name.replace("kv_b_proj", "k_b_proj")
  4048. name_vb = name.replace("kv_b_proj", "v_b_proj")
  4049. n_head_kv = self.hparams["num_key_value_heads"]
  4050. v_head_dim = self.hparams["v_head_dim"]
  4051. qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
  4052. assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
  4053. kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
  4054. k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
  4055. k_b = k_b.transpose(1, 2)
  4056. return [
  4057. (self.map_tensor_name(name_kb), k_b),
  4058. (self.map_tensor_name(name_vb), v_b)
  4059. ]
  4060. return [(self.map_tensor_name(name), data_torch)]
  4061. def prepare_tensors(self):
  4062. super().prepare_tensors()
  4063. if self._experts is not None:
  4064. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4065. experts = [k for d in self._experts for k in d.keys()]
  4066. if len(experts) > 0:
  4067. raise ValueError(f"Unprocessed experts: {experts}")
  4068. @ModelBase.register("PLMForCausalLM")
  4069. class PLMModel(TextModel):
  4070. model_arch = gguf.MODEL_ARCH.PLM
  4071. def set_vocab(self):
  4072. self._set_vocab_gpt2()
  4073. def set_gguf_parameters(self):
  4074. super().set_gguf_parameters()
  4075. hparams = self.hparams
  4076. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4077. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  4078. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  4079. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  4080. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  4081. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4082. return [(self.map_tensor_name(name), data_torch)]
  4083. def prepare_tensors(self):
  4084. super().prepare_tensors()
  4085. @ModelBase.register("T5WithLMHeadModel")
  4086. @ModelBase.register("T5ForConditionalGeneration")
  4087. @ModelBase.register("MT5ForConditionalGeneration")
  4088. @ModelBase.register("UMT5ForConditionalGeneration")
  4089. class T5Model(TextModel):
  4090. model_arch = gguf.MODEL_ARCH.T5
  4091. def __init__(self, *args, **kwargs):
  4092. super().__init__(*args, **kwargs)
  4093. self.shared_token_embeddings_found = False
  4094. def set_vocab(self):
  4095. # to avoid TypeError: Descriptors cannot be created directly
  4096. # exception when importing sentencepiece_model_pb2
  4097. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  4098. from sentencepiece import SentencePieceProcessor
  4099. from sentencepiece import sentencepiece_model_pb2 as model
  4100. tokenizer_path = self.dir_model / 'tokenizer.model'
  4101. # many older models use spiece.model tokenizer model filename
  4102. if not tokenizer_path.is_file():
  4103. tokenizer_path = self.dir_model / 'spiece.model'
  4104. if not tokenizer_path.is_file():
  4105. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  4106. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  4107. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  4108. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  4109. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  4110. # assure the tokenizer model file name is correct
  4111. assert tokenizer_path.name == 'tokenizer.model'
  4112. return self._set_vocab_sentencepiece()
  4113. else:
  4114. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  4115. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  4116. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  4117. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  4118. tokenizer = SentencePieceProcessor()
  4119. tokenizer.LoadFromFile(str(tokenizer_path))
  4120. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  4121. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4122. scores: list[float] = [-10000.0] * vocab_size
  4123. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4124. for token_id in range(tokenizer.vocab_size()):
  4125. piece = tokenizer.IdToPiece(token_id)
  4126. text = piece.encode("utf-8")
  4127. score = tokenizer.GetScore(token_id)
  4128. toktype = SentencePieceTokenTypes.NORMAL
  4129. if tokenizer.IsUnknown(token_id):
  4130. toktype = SentencePieceTokenTypes.UNKNOWN
  4131. elif tokenizer.IsControl(token_id):
  4132. toktype = SentencePieceTokenTypes.CONTROL
  4133. elif tokenizer.IsUnused(token_id):
  4134. toktype = SentencePieceTokenTypes.UNUSED
  4135. elif tokenizer.IsByte(token_id):
  4136. toktype = SentencePieceTokenTypes.BYTE
  4137. tokens[token_id] = text
  4138. scores[token_id] = score
  4139. toktypes[token_id] = toktype
  4140. added_tokens_file = self.dir_model / 'added_tokens.json'
  4141. if added_tokens_file.is_file():
  4142. with open(added_tokens_file, "r", encoding="utf-8") as f:
  4143. added_tokens_json = json.load(f)
  4144. for key in added_tokens_json:
  4145. token_id = added_tokens_json[key]
  4146. if token_id >= vocab_size:
  4147. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  4148. continue
  4149. tokens[token_id] = key.encode("utf-8")
  4150. scores[token_id] = -1000.0
  4151. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  4152. if vocab_size > len(tokens):
  4153. pad_count = vocab_size - len(tokens)
  4154. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  4155. for i in range(1, pad_count + 1):
  4156. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  4157. scores.append(-1000.0)
  4158. toktypes.append(SentencePieceTokenTypes.UNUSED)
  4159. self.gguf_writer.add_tokenizer_model("t5")
  4160. self.gguf_writer.add_tokenizer_pre("default")
  4161. self.gguf_writer.add_token_list(tokens)
  4162. self.gguf_writer.add_token_scores(scores)
  4163. self.gguf_writer.add_token_types(toktypes)
  4164. self.gguf_writer.add_add_space_prefix(add_prefix)
  4165. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  4166. if precompiled_charsmap:
  4167. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  4168. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4169. special_vocab.add_to_gguf(self.gguf_writer)
  4170. self.gguf_writer.add_add_bos_token(False)
  4171. self.gguf_writer.add_add_eos_token(True)
  4172. def set_gguf_parameters(self):
  4173. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  4174. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  4175. n_ctx = 512
  4176. self.gguf_writer.add_context_length(n_ctx)
  4177. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  4178. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  4179. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  4180. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  4181. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  4182. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  4183. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4184. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  4185. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  4186. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  4187. self.gguf_writer.add_file_type(self.ftype)
  4188. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4189. del bid # unused
  4190. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  4191. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  4192. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  4193. # and decoder and ignore the remaining ones.
  4194. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  4195. if not self.shared_token_embeddings_found:
  4196. name = "shared.weight"
  4197. self.shared_token_embeddings_found = True
  4198. else:
  4199. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  4200. return []
  4201. return [(self.map_tensor_name(name), data_torch)]
  4202. @ModelBase.register("T5EncoderModel")
  4203. class T5EncoderModel(TextModel):
  4204. model_arch = gguf.MODEL_ARCH.T5ENCODER
  4205. def __init__(self, *args, **kwargs):
  4206. super().__init__(*args, **kwargs)
  4207. self.shared_token_embeddings_found = False
  4208. def set_vocab(self):
  4209. # to avoid TypeError: Descriptors cannot be created directly
  4210. # exception when importing sentencepiece_model_pb2
  4211. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  4212. from sentencepiece import SentencePieceProcessor
  4213. from sentencepiece import sentencepiece_model_pb2 as model
  4214. tokenizer_path = self.dir_model / 'tokenizer.model'
  4215. # many older models use spiece.model tokenizer model filename
  4216. if not tokenizer_path.is_file():
  4217. tokenizer_path = self.dir_model / 'spiece.model'
  4218. if not tokenizer_path.is_file():
  4219. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  4220. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  4221. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  4222. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  4223. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  4224. # assure the tokenizer model file name is correct
  4225. assert tokenizer_path.name == 'tokenizer.model'
  4226. return self._set_vocab_sentencepiece()
  4227. else:
  4228. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  4229. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  4230. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  4231. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  4232. tokenizer = SentencePieceProcessor()
  4233. tokenizer.LoadFromFile(str(tokenizer_path))
  4234. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  4235. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4236. scores: list[float] = [-10000.0] * vocab_size
  4237. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4238. for token_id in range(tokenizer.vocab_size()):
  4239. piece = tokenizer.IdToPiece(token_id)
  4240. text = piece.encode("utf-8")
  4241. score = tokenizer.GetScore(token_id)
  4242. toktype = SentencePieceTokenTypes.NORMAL
  4243. if tokenizer.IsUnknown(token_id):
  4244. toktype = SentencePieceTokenTypes.UNKNOWN
  4245. elif tokenizer.IsControl(token_id):
  4246. toktype = SentencePieceTokenTypes.CONTROL
  4247. elif tokenizer.IsUnused(token_id):
  4248. toktype = SentencePieceTokenTypes.UNUSED
  4249. elif tokenizer.IsByte(token_id):
  4250. toktype = SentencePieceTokenTypes.BYTE
  4251. tokens[token_id] = text
  4252. scores[token_id] = score
  4253. toktypes[token_id] = toktype
  4254. added_tokens_file = self.dir_model / 'added_tokens.json'
  4255. if added_tokens_file.is_file():
  4256. with open(added_tokens_file, "r", encoding="utf-8") as f:
  4257. added_tokens_json = json.load(f)
  4258. for key in added_tokens_json:
  4259. token_id = added_tokens_json[key]
  4260. if token_id >= vocab_size:
  4261. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  4262. continue
  4263. tokens[token_id] = key.encode("utf-8")
  4264. scores[token_id] = -1000.0
  4265. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  4266. if vocab_size > len(tokens):
  4267. pad_count = vocab_size - len(tokens)
  4268. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  4269. for i in range(1, pad_count + 1):
  4270. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  4271. scores.append(-1000.0)
  4272. toktypes.append(SentencePieceTokenTypes.UNUSED)
  4273. self.gguf_writer.add_tokenizer_model("t5")
  4274. self.gguf_writer.add_tokenizer_pre("default")
  4275. self.gguf_writer.add_token_list(tokens)
  4276. self.gguf_writer.add_token_scores(scores)
  4277. self.gguf_writer.add_token_types(toktypes)
  4278. self.gguf_writer.add_add_space_prefix(add_prefix)
  4279. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  4280. if precompiled_charsmap:
  4281. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  4282. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4283. special_vocab.add_to_gguf(self.gguf_writer)
  4284. self.gguf_writer.add_add_bos_token(False)
  4285. self.gguf_writer.add_add_eos_token(True)
  4286. def set_gguf_parameters(self):
  4287. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  4288. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  4289. n_ctx = 512
  4290. self.gguf_writer.add_context_length(n_ctx)
  4291. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  4292. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  4293. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  4294. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  4295. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  4296. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  4297. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4298. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  4299. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  4300. self.gguf_writer.add_file_type(self.ftype)
  4301. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4302. del bid # unused
  4303. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  4304. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  4305. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  4306. # and decoder and ignore the remaining ones.
  4307. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  4308. if not self.shared_token_embeddings_found:
  4309. name = "shared.weight"
  4310. self.shared_token_embeddings_found = True
  4311. else:
  4312. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  4313. return []
  4314. return [(self.map_tensor_name(name), data_torch)]
  4315. @ModelBase.register("JAISLMHeadModel")
  4316. class JaisModel(TextModel):
  4317. model_arch = gguf.MODEL_ARCH.JAIS
  4318. def __init__(self, *args, **kwargs):
  4319. super().__init__(*args, **kwargs)
  4320. # SwigLU activation
  4321. assert self.hparams["activation_function"] == "swiglu"
  4322. # ALiBi position embedding
  4323. assert self.hparams["position_embedding_type"] == "alibi"
  4324. # Embeddings scale
  4325. self.embeddings_scale = 1.0
  4326. if 'mup_embeddings_scale' in self.hparams:
  4327. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  4328. elif 'embeddings_scale' in self.hparams:
  4329. self.embeddings_scale = self.hparams['embeddings_scale']
  4330. else:
  4331. assert False
  4332. self.width_scale = 1.0
  4333. if 'mup_output_alpha' in self.hparams:
  4334. assert 'mup_width_scale' in self.hparams
  4335. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  4336. elif 'width_scale' in self.hparams:
  4337. self.width_scale = self.hparams['width_scale']
  4338. else:
  4339. assert False
  4340. self.max_alibi_bias = 8.0
  4341. def set_vocab(self):
  4342. self._set_vocab_gpt2()
  4343. def set_gguf_parameters(self):
  4344. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  4345. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  4346. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  4347. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  4348. self.gguf_writer.add_head_count(self.hparams["n_head"])
  4349. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4350. self.gguf_writer.add_file_type(self.ftype)
  4351. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4352. del bid # unused
  4353. tensors: list[tuple[str, Tensor]] = []
  4354. # we don't need these
  4355. if name.endswith((".attn.bias")):
  4356. return tensors
  4357. if name.endswith(("relative_pe.slopes")):
  4358. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  4359. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  4360. # but Jais's PyTorch model simply precalculates the slope values and places them
  4361. # in relative_pes.slopes
  4362. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  4363. first_val = float(data_torch[0].item())
  4364. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  4365. return tensors
  4366. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  4367. data_torch = data_torch.transpose(1, 0)
  4368. new_name = self.map_tensor_name(name)
  4369. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  4370. tensors.append((new_name, data_torch * self.embeddings_scale))
  4371. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  4372. tensors.append((new_name, data_torch * self.width_scale))
  4373. else:
  4374. tensors.append((new_name, data_torch))
  4375. return tensors
  4376. def prepare_tensors(self):
  4377. super().prepare_tensors()
  4378. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  4379. @ModelBase.register("Glm4ForCausalLM")
  4380. class Glm4Model(TextModel):
  4381. model_arch = gguf.MODEL_ARCH.GLM4
  4382. def set_vocab(self):
  4383. from transformers import AutoTokenizer
  4384. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  4385. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4386. tokens, toktypes, tokpre = self.get_vocab_base()
  4387. self.gguf_writer.add_tokenizer_model("gpt2")
  4388. self.gguf_writer.add_tokenizer_pre(tokpre)
  4389. self.gguf_writer.add_token_list(tokens)
  4390. self.gguf_writer.add_token_types(toktypes)
  4391. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4392. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4393. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  4394. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  4395. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4396. special_vocab.add_to_gguf(self.gguf_writer)
  4397. def set_gguf_parameters(self):
  4398. super().set_gguf_parameters()
  4399. rope_dim = self.hparams["head_dim"]
  4400. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  4401. rope_scaling = self.hparams.get("rope_scaling") or {}
  4402. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4403. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4404. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4405. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4406. @ModelBase.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
  4407. class ChatGLMModel(TextModel):
  4408. model_arch = gguf.MODEL_ARCH.CHATGLM
  4409. def set_vocab_chatglm3(self):
  4410. dir_model = self.dir_model
  4411. hparams = self.hparams
  4412. tokens: list[bytes] = []
  4413. toktypes: list[int] = []
  4414. scores: list[float] = []
  4415. from transformers import AutoTokenizer
  4416. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  4417. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  4418. assert max(tokenizer.get_vocab().values()) < vocab_size
  4419. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  4420. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  4421. for token_id in range(vocab_size):
  4422. piece = tokenizer._convert_id_to_token(token_id)
  4423. if token_id == 0:
  4424. piece = "<unk>"
  4425. elif token_id == 1:
  4426. piece = "<bos>"
  4427. elif token_id == 2:
  4428. piece = "<eos>"
  4429. text = piece.encode("utf-8")
  4430. score = 0.0
  4431. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  4432. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  4433. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  4434. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  4435. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  4436. if piece in special_tokens:
  4437. toktype = SentencePieceTokenTypes.CONTROL
  4438. elif len(piece) == 0:
  4439. text = f"[PAD{token_id}]".encode("utf-8")
  4440. toktype = SentencePieceTokenTypes.UNUSED
  4441. else:
  4442. toktype = SentencePieceTokenTypes.USER_DEFINED
  4443. tokens.append(text)
  4444. scores.append(score)
  4445. toktypes.append(toktype)
  4446. continue
  4447. toktype = SentencePieceTokenTypes.NORMAL
  4448. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  4449. toktype = SentencePieceTokenTypes.UNKNOWN
  4450. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  4451. toktype = SentencePieceTokenTypes.CONTROL
  4452. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  4453. toktype = SentencePieceTokenTypes.UNUSED
  4454. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  4455. toktype = SentencePieceTokenTypes.BYTE
  4456. tokens.append(text)
  4457. scores.append(score)
  4458. toktypes.append(toktype)
  4459. self.gguf_writer.add_tokenizer_model("llama")
  4460. # glm3 needs prefix and suffix formatted as:
  4461. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  4462. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  4463. self.gguf_writer.add_token_list(tokens)
  4464. self.gguf_writer.add_token_scores(scores)
  4465. self.gguf_writer.add_token_types(toktypes)
  4466. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4467. special_vocab.add_to_gguf(self.gguf_writer)
  4468. @staticmethod
  4469. def token_bytes_to_string(b):
  4470. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  4471. byte_encoder = bytes_to_unicode()
  4472. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  4473. @staticmethod
  4474. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  4475. parts = [bytes([b]) for b in token]
  4476. while True:
  4477. min_idx = None
  4478. min_rank = None
  4479. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  4480. rank = mergeable_ranks.get(pair[0] + pair[1])
  4481. if rank is not None and (min_rank is None or rank < min_rank):
  4482. min_idx = i
  4483. min_rank = rank
  4484. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  4485. break
  4486. assert min_idx is not None
  4487. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  4488. return parts
  4489. def set_vocab(self):
  4490. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  4491. self.set_vocab_chatglm3()
  4492. return
  4493. dir_model = self.dir_model
  4494. hparams = self.hparams
  4495. tokens: list[str] = []
  4496. toktypes: list[int] = []
  4497. from transformers import AutoTokenizer
  4498. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  4499. vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"])
  4500. assert max(tokenizer.get_vocab().values()) < vocab_size
  4501. tokens, toktypes, tokpre = self.get_vocab_base()
  4502. self.gguf_writer.add_tokenizer_model("gpt2")
  4503. self.gguf_writer.add_tokenizer_pre(tokpre)
  4504. self.gguf_writer.add_token_list(tokens)
  4505. self.gguf_writer.add_token_types(toktypes)
  4506. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4507. # only add special tokens when they were not already loaded from config.json
  4508. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4509. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  4510. # this one is usually not in config.json anyway
  4511. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  4512. special_vocab.add_to_gguf(self.gguf_writer)
  4513. def set_gguf_parameters(self):
  4514. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  4515. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  4516. n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head))
  4517. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  4518. self.gguf_writer.add_embedding_length(n_embed)
  4519. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed)))
  4520. self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"]))
  4521. self.gguf_writer.add_head_count(n_head)
  4522. self.gguf_writer.add_head_count_kv(n_head_kv)
  4523. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5))
  4524. self.gguf_writer.add_file_type(self.ftype)
  4525. if "attention_dim" in self.hparams:
  4526. rope_dim = self.hparams["attention_dim"]
  4527. else:
  4528. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  4529. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  4530. self.gguf_writer.add_add_bos_token(False)
  4531. rope_freq = 10000
  4532. if "rope_ratio" in self.hparams:
  4533. rope_freq = rope_freq * self.hparams["rope_ratio"]
  4534. self.gguf_writer.add_rope_freq_base(rope_freq)
  4535. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4536. del bid # unused
  4537. if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."):
  4538. return []
  4539. name = name.removeprefix("transformer.")
  4540. return [(self.map_tensor_name(name), data_torch)]
  4541. @ModelBase.register("NemotronForCausalLM")
  4542. class NemotronModel(TextModel):
  4543. model_arch = gguf.MODEL_ARCH.NEMOTRON
  4544. def set_vocab(self):
  4545. self._set_vocab_sentencepiece()
  4546. self.gguf_writer.add_pad_token_id(0)
  4547. self.gguf_writer.add_unk_token_id(1)
  4548. def set_gguf_parameters(self):
  4549. super().set_gguf_parameters()
  4550. hparams = self.hparams
  4551. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4552. f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
  4553. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  4554. # * Partial RoPE
  4555. rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
  4556. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  4557. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  4558. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  4559. # * RopeScaling for Nemotron
  4560. if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
  4561. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4562. else:
  4563. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4564. self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
  4565. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4566. # * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
  4567. # model.layers.{l}.input_layernorm.weight
  4568. # model.layers.{l}.post_attention_layernorm.weight
  4569. # model.norm.weight
  4570. if name.endswith("norm.weight"):
  4571. data_torch = data_torch + 1
  4572. return [(self.map_tensor_name(name), data_torch)]
  4573. @ModelBase.register("ExaoneForCausalLM")
  4574. class ExaoneModel(TextModel):
  4575. model_arch = gguf.MODEL_ARCH.EXAONE
  4576. def set_gguf_parameters(self):
  4577. hparams = self.hparams
  4578. assert (hparams["activation_function"] == "silu")
  4579. max_position_embeddings = hparams["max_position_embeddings"]
  4580. embed_dim = hparams["hidden_size"]
  4581. num_heads = hparams["num_attention_heads"]
  4582. num_kv_heads = hparams.get("num_key_value_heads", num_heads)
  4583. layer_norm_eps = hparams["layer_norm_epsilon"]
  4584. intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
  4585. num_layers = hparams["num_layers"]
  4586. # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
  4587. # attention_dropout_rate = hparams["attention_dropout"]
  4588. # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
  4589. # embed_dropout_rate = hparams["embed_dropout"]
  4590. self.gguf_writer.add_embedding_length(embed_dim)
  4591. self.gguf_writer.add_head_count(num_heads)
  4592. self.gguf_writer.add_head_count_kv(num_kv_heads)
  4593. self.gguf_writer.add_context_length(max_position_embeddings)
  4594. self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
  4595. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4596. self.gguf_writer.add_block_count(num_layers)
  4597. self.gguf_writer.add_file_type(self.ftype)
  4598. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  4599. self.gguf_writer.add_rope_freq_base(rope_theta)
  4600. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
  4601. rotary_factor = rotary_factor if rotary_factor is not None else 1.0
  4602. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  4603. rope_scaling = self.hparams.get("rope_scaling") or {}
  4604. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  4605. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4606. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4607. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  4608. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  4609. if rope_scaling.get("rope_type", '').lower() == "llama3":
  4610. base = self.hparams.get("rope_theta", 10000.0)
  4611. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  4612. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  4613. factor = rope_scaling.get("factor", 8.0)
  4614. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  4615. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  4616. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  4617. low_freq_wavelen = old_context_len / low_freq_factor
  4618. high_freq_wavelen = old_context_len / high_freq_factor
  4619. assert low_freq_wavelen != high_freq_wavelen
  4620. rope_factors = []
  4621. for freq in freqs:
  4622. wavelen = 2 * math.pi / freq
  4623. if wavelen < high_freq_wavelen:
  4624. rope_factors.append(1)
  4625. elif wavelen > low_freq_wavelen:
  4626. rope_factors.append(factor)
  4627. else:
  4628. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  4629. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  4630. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  4631. @ModelBase.register("GraniteForCausalLM")
  4632. class GraniteModel(LlamaModel):
  4633. """Conversion for IBM's GraniteForCausalLM"""
  4634. model_arch = gguf.MODEL_ARCH.GRANITE
  4635. def set_gguf_parameters(self):
  4636. """Granite uses standard llama parameters with the following differences:
  4637. - No head_dim support
  4638. - New multiplier params:
  4639. - attention_scale
  4640. - embedding_scale
  4641. - residual_scale
  4642. - logits_scaling
  4643. """
  4644. if head_dim := self.hparams.pop("head_dim", None):
  4645. logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
  4646. super().set_gguf_parameters()
  4647. # NOTE: Convert _multiplier params to _scale params for naming
  4648. # consistency
  4649. if attention_scale := self.hparams.get("attention_multiplier"):
  4650. self.gguf_writer.add_attention_scale(attention_scale)
  4651. logger.info("gguf: (granite) attention_scale = %s", attention_scale)
  4652. if embedding_scale := self.hparams.get("embedding_multiplier"):
  4653. self.gguf_writer.add_embedding_scale(embedding_scale)
  4654. logger.info("gguf: (granite) embedding_scale = %s", embedding_scale)
  4655. if residual_scale := self.hparams.get("residual_multiplier"):
  4656. self.gguf_writer.add_residual_scale(residual_scale)
  4657. logger.info("gguf: (granite) residual_scale = %s", residual_scale)
  4658. if logits_scale := self.hparams.get("logits_scaling"):
  4659. self.gguf_writer.add_logit_scale(logits_scale)
  4660. logger.info("gguf: (granite) logits_scale = %s", logits_scale)
  4661. @ModelBase.register("GraniteMoeForCausalLM", "GraniteMoeSharedForCausalLM")
  4662. class GraniteMoeModel(GraniteModel):
  4663. """Conversion for IBM's GraniteMoeForCausalLM"""
  4664. model_arch = gguf.MODEL_ARCH.GRANITE_MOE
  4665. def set_gguf_parameters(self):
  4666. """GraniteMoeShared uses GraniteMoe parameters plus the following:
  4667. - shared_intermediate_size
  4668. """
  4669. super().set_gguf_parameters()
  4670. if shared_feed_forward_length := self.hparams.get("shared_intermediate_size"):
  4671. self.gguf_writer.add_expert_shared_feed_forward_length(shared_feed_forward_length)
  4672. logger.info("gguf: (granitemoeshared) shared_feed_forward_length = %s", shared_feed_forward_length)
  4673. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4674. """In modeling_granitemoe, the JetMoe implementation of parallel experts
  4675. is used. This essentially merges w1 and w3 into a single tensor with 2x
  4676. the hidden size that is then split during forward. To keep compatibility
  4677. with existing mixtral support, we pull them apart here.
  4678. """
  4679. if name.endswith("block_sparse_moe.input_linear.weight"):
  4680. ffn_dim = self.hparams["intermediate_size"]
  4681. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size"
  4682. gate, up = data_torch.split(ffn_dim, dim=-2)
  4683. return [
  4684. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate),
  4685. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up),
  4686. ]
  4687. if name.endswith("shared_mlp.input_linear.weight"):
  4688. ffn_dim = self.hparams["shared_intermediate_size"]
  4689. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * shared_intermediate_size"
  4690. gate, up = data_torch.split(ffn_dim, dim=-2)
  4691. return [
  4692. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), gate),
  4693. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), up),
  4694. ]
  4695. return super().modify_tensors(data_torch, name, bid)
  4696. @ModelBase.register("BailingMoeForCausalLM")
  4697. class BailingMoeModel(TextModel):
  4698. model_arch = gguf.MODEL_ARCH.BAILINGMOE
  4699. def set_vocab(self):
  4700. self._set_vocab_gpt2()
  4701. def set_gguf_parameters(self):
  4702. super().set_gguf_parameters()
  4703. hparams = self.hparams
  4704. rope_dim = hparams.get("head_dim") or hparams["hidden_size"] // hparams["num_attention_heads"]
  4705. self.gguf_writer.add_rope_dimension_count(rope_dim)
  4706. rope_scaling = self.hparams.get("rope_scaling") or {}
  4707. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4708. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4709. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4710. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4711. else:
  4712. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4713. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  4714. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4715. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  4716. self.gguf_writer.add_expert_weights_scale(1.0)
  4717. self.gguf_writer.add_expert_count(hparams["num_experts"])
  4718. self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
  4719. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  4720. _experts: list[dict[str, Tensor]] | None = None
  4721. @staticmethod
  4722. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  4723. if n_head_kv is not None and n_head != n_head_kv:
  4724. n_head = n_head_kv
  4725. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  4726. .swapaxes(1, 2)
  4727. .reshape(weights.shape))
  4728. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4729. n_head = self.hparams["num_attention_heads"]
  4730. n_kv_head = self.hparams.get("num_key_value_heads")
  4731. n_embd = self.hparams["hidden_size"]
  4732. head_dim = self.hparams.get("head_dim") or n_embd // n_head
  4733. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  4734. if name.endswith("attention.dense.weight"):
  4735. return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), data_torch)]
  4736. elif name.endswith("query_key_value.weight"):
  4737. q, k, v = data_torch.split([n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim], dim=-2)
  4738. return [
  4739. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), BailingMoeModel.permute(q, n_head, n_head)),
  4740. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), BailingMoeModel.permute(k, n_head, n_kv_head)),
  4741. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v)
  4742. ]
  4743. elif name.find("mlp.experts") != -1:
  4744. n_experts = self.hparams["num_experts"]
  4745. assert bid is not None
  4746. tensors: list[tuple[str, Tensor]] = []
  4747. if self._experts is None:
  4748. self._experts = [{} for _ in range(self.block_count)]
  4749. self._experts[bid][name] = data_torch
  4750. if len(self._experts[bid]) >= n_experts * 3:
  4751. # merge the experts into a single 3d tensor
  4752. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4753. datas: list[Tensor] = []
  4754. for xid in range(n_experts):
  4755. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4756. datas.append(self._experts[bid][ename])
  4757. del self._experts[bid][ename]
  4758. data_torch = torch.stack(datas, dim=0)
  4759. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  4760. new_name = self.map_tensor_name(merged_name)
  4761. tensors.append((new_name, data_torch))
  4762. return tensors
  4763. new_name = self.map_tensor_name(name)
  4764. if new_name == output_name and self.hparams.get("norm_head"):
  4765. data_torch = data_torch.float()
  4766. data_torch /= torch.norm(data_torch, p=2, dim=0, keepdim=True) + 1e-7
  4767. return [(new_name, data_torch)]
  4768. def prepare_tensors(self):
  4769. super().prepare_tensors()
  4770. if self._experts is not None:
  4771. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4772. experts = [k for d in self._experts for k in d.keys()]
  4773. if len(experts) > 0:
  4774. raise ValueError(f"Unprocessed experts: {experts}")
  4775. @ModelBase.register("ChameleonForConditionalGeneration")
  4776. @ModelBase.register("ChameleonForCausalLM") # obsolete
  4777. class ChameleonModel(TextModel):
  4778. model_arch = gguf.MODEL_ARCH.CHAMELEON
  4779. def set_gguf_parameters(self):
  4780. super().set_gguf_parameters()
  4781. self.gguf_writer.add_swin_norm(self.hparams.get("swin_norm", False))
  4782. def set_vocab(self):
  4783. self._set_vocab_gpt2()
  4784. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4785. # ignore image tokenizer for now
  4786. # TODO: remove this once image support is implemented for Chameleon
  4787. if name.startswith("model.vqmodel"):
  4788. return []
  4789. n_head = self.hparams["num_attention_heads"]
  4790. n_kv_head = self.hparams.get("num_key_value_heads")
  4791. hidden_dim = self.hparams.get("hidden_size")
  4792. if name.endswith(("q_proj.weight", "q_proj.bias")):
  4793. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  4794. if name.endswith(("k_proj.weight", "k_proj.bias")):
  4795. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  4796. if name.endswith(("q_norm.weight", "q_norm.bias")):
  4797. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim)
  4798. if name.endswith(("k_norm.weight", "k_norm.bias")):
  4799. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim)
  4800. return [(self.map_tensor_name(name), data_torch)]
  4801. # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203
  4802. @staticmethod
  4803. def _reverse_hf_permute(data_torch, n_heads, hidden_dim):
  4804. head_dim = hidden_dim // n_heads
  4805. data_torch = data_torch[0].view(2, head_dim // 2).t().reshape(1, -1)
  4806. data_torch = data_torch.repeat_interleave(n_heads, 0)
  4807. return data_torch
  4808. ###### CONVERSION LOGIC ######
  4809. # tree of lazy tensors
  4810. class LazyTorchTensor(gguf.LazyBase):
  4811. _tensor_type = torch.Tensor
  4812. # to keep the type-checker happy
  4813. dtype: torch.dtype
  4814. shape: torch.Size
  4815. # only used when converting a torch.Tensor to a np.ndarray
  4816. _dtype_map: dict[torch.dtype, type] = {
  4817. torch.float16: np.float16,
  4818. torch.float32: np.float32,
  4819. }
  4820. # used for safetensors slices
  4821. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  4822. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  4823. _dtype_str_map: dict[str, torch.dtype] = {
  4824. "F64": torch.float64,
  4825. "F32": torch.float32,
  4826. "BF16": torch.bfloat16,
  4827. "F16": torch.float16,
  4828. # "U64": torch.uint64,
  4829. "I64": torch.int64,
  4830. # "U32": torch.uint32,
  4831. "I32": torch.int32,
  4832. # "U16": torch.uint16,
  4833. "I16": torch.int16,
  4834. "U8": torch.uint8,
  4835. "I8": torch.int8,
  4836. "BOOL": torch.bool,
  4837. "F8_E4M3": torch.float8_e4m3fn,
  4838. "F8_E5M2": torch.float8_e5m2,
  4839. }
  4840. def numpy(self) -> gguf.LazyNumpyTensor:
  4841. dtype = self._dtype_map[self.dtype]
  4842. return gguf.LazyNumpyTensor(
  4843. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  4844. args=(self,),
  4845. func=(lambda s: s.numpy())
  4846. )
  4847. @classmethod
  4848. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  4849. return torch.empty(size=shape, dtype=dtype, device="meta")
  4850. @classmethod
  4851. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  4852. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  4853. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  4854. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  4855. return cast(torch.Tensor, lazy)
  4856. @classmethod
  4857. def from_remote_tensor(cls, remote_tensor: gguf.utility.RemoteTensor):
  4858. dtype = cls._dtype_str_map[remote_tensor.dtype]
  4859. shape = remote_tensor.shape
  4860. meta = cls.meta_with_dtype_and_shape(dtype, shape)
  4861. lazy = cls(meta=meta, args=(remote_tensor,), func=lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape))
  4862. return cast(torch.Tensor, lazy)
  4863. @classmethod
  4864. def __torch_function__(cls, func, types, args=(), kwargs=None):
  4865. del types # unused
  4866. if kwargs is None:
  4867. kwargs = {}
  4868. if func is torch.Tensor.numpy:
  4869. return args[0].numpy()
  4870. return cls._wrap_fn(func)(*args, **kwargs)
  4871. def parse_args() -> argparse.Namespace:
  4872. parser = argparse.ArgumentParser(
  4873. description="Convert a huggingface model to a GGML compatible file")
  4874. parser.add_argument(
  4875. "--vocab-only", action="store_true",
  4876. help="extract only the vocab",
  4877. )
  4878. parser.add_argument(
  4879. "--outfile", type=Path,
  4880. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  4881. )
  4882. parser.add_argument(
  4883. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
  4884. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  4885. )
  4886. parser.add_argument(
  4887. "--bigendian", action="store_true",
  4888. help="model is executed on big endian machine",
  4889. )
  4890. parser.add_argument(
  4891. "model", type=Path,
  4892. help="directory containing model file",
  4893. nargs="?",
  4894. )
  4895. parser.add_argument(
  4896. "--use-temp-file", action="store_true",
  4897. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  4898. )
  4899. parser.add_argument(
  4900. "--no-lazy", action="store_true",
  4901. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  4902. )
  4903. parser.add_argument(
  4904. "--model-name", type=str, default=None,
  4905. help="name of the model",
  4906. )
  4907. parser.add_argument(
  4908. "--verbose", action="store_true",
  4909. help="increase output verbosity",
  4910. )
  4911. parser.add_argument(
  4912. "--split-max-tensors", type=int, default=0,
  4913. help="max tensors in each split",
  4914. )
  4915. parser.add_argument(
  4916. "--split-max-size", type=str, default="0",
  4917. help="max size per split N(M|G)",
  4918. )
  4919. parser.add_argument(
  4920. "--dry-run", action="store_true",
  4921. help="only print out a split plan and exit, without writing any new files",
  4922. )
  4923. parser.add_argument(
  4924. "--no-tensor-first-split", action="store_true",
  4925. help="do not add tensors to the first split (disabled by default)"
  4926. )
  4927. parser.add_argument(
  4928. "--metadata", type=Path,
  4929. help="Specify the path for an authorship metadata override file"
  4930. )
  4931. parser.add_argument(
  4932. "--print-supported-models", action="store_true",
  4933. help="Print the supported models"
  4934. )
  4935. parser.add_argument(
  4936. "--remote", action="store_true",
  4937. help="(Experimental) Read safetensors file remotely without downloading to disk. Config and tokenizer files will still be downloaded. To use this feature, you need to specify Hugging Face model repo name instead of a local directory. For example: 'HuggingFaceTB/SmolLM2-1.7B-Instruct'. Note: To access gated repo, set HF_TOKEN environment variable to your Hugging Face token.",
  4938. )
  4939. parser.add_argument(
  4940. "--mmproj", action="store_true",
  4941. help="(Experimental) Export multimodal projector (mmproj) for vision models. This will only work on some vision models. A prefix 'mmproj-' will be added to the output file name.",
  4942. )
  4943. args = parser.parse_args()
  4944. if not args.print_supported_models and args.model is None:
  4945. parser.error("the following arguments are required: model")
  4946. return args
  4947. def split_str_to_n_bytes(split_str: str) -> int:
  4948. if split_str.endswith("K"):
  4949. n = int(split_str[:-1]) * 1000
  4950. elif split_str.endswith("M"):
  4951. n = int(split_str[:-1]) * 1000 * 1000
  4952. elif split_str.endswith("G"):
  4953. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  4954. elif split_str.isnumeric():
  4955. n = int(split_str)
  4956. else:
  4957. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  4958. if n < 0:
  4959. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  4960. return n
  4961. def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> str:
  4962. text_config = hparams.get("text_config", {})
  4963. vision_config = hparams.get("vision_config", {})
  4964. arch = hparams["architectures"][0]
  4965. # if "architectures" is found in the sub-config, use that instead
  4966. if model_type == ModelType.TEXT and text_config.get("architectures") is not None:
  4967. arch = text_config["architectures"][0]
  4968. elif model_type == ModelType.VISION and vision_config.get("architectures") is not None:
  4969. arch = vision_config["architectures"][0]
  4970. return arch
  4971. def main() -> None:
  4972. args = parse_args()
  4973. if args.print_supported_models:
  4974. logger.error("Supported models:")
  4975. ModelBase.print_registered_models()
  4976. sys.exit(0)
  4977. if args.verbose:
  4978. logging.basicConfig(level=logging.DEBUG)
  4979. else:
  4980. logging.basicConfig(level=logging.INFO)
  4981. dir_model = args.model
  4982. if args.remote:
  4983. from huggingface_hub import snapshot_download
  4984. local_dir = snapshot_download(
  4985. repo_id=str(dir_model),
  4986. allow_patterns=["LICENSE", "*.json", "*.md", "*.txt", "tokenizer.model"])
  4987. dir_model = Path(local_dir)
  4988. logger.info(f"Downloaded config and tokenizer to {local_dir}")
  4989. if not dir_model.is_dir():
  4990. logger.error(f'Error: {args.model} is not a directory')
  4991. sys.exit(1)
  4992. ftype_map: dict[str, gguf.LlamaFileType] = {
  4993. "f32": gguf.LlamaFileType.ALL_F32,
  4994. "f16": gguf.LlamaFileType.MOSTLY_F16,
  4995. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  4996. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  4997. "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
  4998. "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
  4999. "auto": gguf.LlamaFileType.GUESSED,
  5000. }
  5001. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  5002. if args.use_temp_file and is_split:
  5003. logger.error("Error: Cannot use temp file when splitting")
  5004. sys.exit(1)
  5005. if args.outfile is not None:
  5006. fname_out = args.outfile
  5007. elif args.remote:
  5008. # if remote, use the model ID as the output file name
  5009. fname_out = Path("./" + str(args.model).replace("/", "-") + "-{ftype}.gguf")
  5010. else:
  5011. fname_out = dir_model
  5012. logger.info(f"Loading model: {dir_model.name}")
  5013. if args.mmproj:
  5014. if "mmproj" not in fname_out.name:
  5015. fname_out = ModelBase.add_prefix_to_filename(fname_out, "mmproj-")
  5016. with torch.inference_mode():
  5017. output_type = ftype_map[args.outtype]
  5018. model_type = ModelType.VISION if args.mmproj else ModelType.TEXT
  5019. hparams = ModelBase.load_hparams(dir_model)
  5020. model_architecture = get_model_architecture(hparams, model_type)
  5021. logger.info(f"Model architecture: {model_architecture}")
  5022. try:
  5023. model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type)
  5024. except NotImplementedError:
  5025. logger.error(f"Model {model_architecture} is not supported")
  5026. sys.exit(1)
  5027. model_instance = model_class(dir_model, output_type, fname_out,
  5028. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  5029. eager=args.no_lazy,
  5030. metadata_override=args.metadata, model_name=args.model_name,
  5031. split_max_tensors=args.split_max_tensors,
  5032. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  5033. small_first_shard=args.no_tensor_first_split,
  5034. remote_hf_model_id=str(args.model) if args.remote else None)
  5035. if args.vocab_only:
  5036. logger.info("Exporting model vocab...")
  5037. model_instance.write_vocab()
  5038. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  5039. else:
  5040. logger.info("Exporting model...")
  5041. model_instance.write()
  5042. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  5043. logger.info(f"Model successfully exported to {out_path}")
  5044. if __name__ == '__main__':
  5045. main()