convert_hf_to_gguf.py 278 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import ast
  5. import logging
  6. import argparse
  7. import contextlib
  8. import json
  9. import os
  10. import re
  11. import sys
  12. from enum import IntEnum
  13. from pathlib import Path
  14. from hashlib import sha256
  15. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  16. from itertools import chain
  17. from transformers import AutoConfig
  18. import math
  19. import numpy as np
  20. import torch
  21. if TYPE_CHECKING:
  22. from torch import Tensor
  23. if 'NO_LOCAL_GGUF' not in os.environ:
  24. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  25. import gguf
  26. logger = logging.getLogger("hf-to-gguf")
  27. ###### MODEL DEFINITIONS ######
  28. class SentencePieceTokenTypes(IntEnum):
  29. NORMAL = 1
  30. UNKNOWN = 2
  31. CONTROL = 3
  32. USER_DEFINED = 4
  33. UNUSED = 5
  34. BYTE = 6
  35. class ModelType(IntEnum):
  36. TEXT = 1
  37. VISION = 2
  38. AnyModel = TypeVar("AnyModel", bound="type[ModelBase]")
  39. class ModelBase:
  40. _model_classes: dict[ModelType, dict[str, type[ModelBase]]] = {
  41. ModelType.TEXT: {},
  42. ModelType.VISION: {},
  43. }
  44. dir_model: Path
  45. ftype: gguf.LlamaFileType
  46. fname_out: Path
  47. is_big_endian: bool
  48. endianess: gguf.GGUFEndian
  49. use_temp_file: bool
  50. lazy: bool
  51. part_names: list[str]
  52. is_safetensors: bool
  53. hparams: dict[str, Any]
  54. tensor_names: set[str] | None
  55. gguf_writer: gguf.GGUFWriter
  56. model_name: str | None
  57. metadata_override: Path | None
  58. dir_model_card: Path
  59. remote_hf_model_id: str | None
  60. # subclasses should define this!
  61. model_arch: gguf.MODEL_ARCH
  62. # subclasses should initialize this!
  63. block_count: int
  64. tensor_map: gguf.TensorNameMap
  65. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, *, is_big_endian: bool = False,
  66. use_temp_file: bool = False, eager: bool = False,
  67. metadata_override: Path | None = None, model_name: str | None = None,
  68. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
  69. small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None):
  70. if type(self) is ModelBase or \
  71. type(self) is TextModel or \
  72. type(self) is VisionModel:
  73. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  74. self.dir_model = dir_model
  75. self.ftype = ftype
  76. self.fname_out = fname_out
  77. self.is_big_endian = is_big_endian
  78. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  79. self.use_temp_file = use_temp_file
  80. self.lazy = not eager or (remote_hf_model_id is not None)
  81. self.remote_hf_model_id = remote_hf_model_id
  82. if remote_hf_model_id is not None:
  83. self.is_safetensors = True
  84. def get_remote_tensors() -> Iterator[tuple[str, Tensor]]:
  85. logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}")
  86. remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id)
  87. self.tensor_names = set(name for name in remote_tensors.keys())
  88. for name, remote_tensor in gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id).items():
  89. yield (name, LazyTorchTensor.from_remote_tensor(remote_tensor))
  90. self.get_tensors = get_remote_tensors
  91. else:
  92. self.part_names = ModelBase.get_model_part_names(self.dir_model, "model", ".safetensors")
  93. self.is_safetensors = len(self.part_names) > 0
  94. if not self.is_safetensors:
  95. self.part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  96. self.hparams = ModelBase.load_hparams(self.dir_model) if hparams is None else hparams
  97. self.tensor_names = None
  98. self.metadata_override = metadata_override
  99. self.model_name = model_name
  100. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  101. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  102. if self.ftype == gguf.LlamaFileType.GUESSED:
  103. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  104. _, first_tensor = next(self.get_tensors())
  105. if first_tensor.dtype == torch.float16:
  106. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  107. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  108. else:
  109. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  110. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  111. # Configure GGUF Writer
  112. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  113. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  114. @classmethod
  115. def add_prefix_to_filename(cls, path: Path, prefix: str) -> Path:
  116. stem, suffix = path.stem, path.suffix
  117. new_name = f"{prefix}{stem}{suffix}"
  118. return path.with_name(new_name)
  119. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  120. key = next((k for k in keys if k in self.hparams), None)
  121. if key is not None:
  122. return self.hparams[key]
  123. if optional:
  124. return None
  125. raise KeyError(f"could not find any of: {keys}")
  126. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  127. tensor_names_from_parts: set[str] = set()
  128. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  129. index_name += ".index.json"
  130. index_file = self.dir_model / index_name
  131. if index_file.is_file():
  132. self.tensor_names = set()
  133. logger.info(f"gguf: loading model weight map from '{index_name}'")
  134. with open(index_file, "r", encoding="utf-8") as f:
  135. index: dict[str, Any] = json.load(f)
  136. weight_map = index.get("weight_map")
  137. if weight_map is None or not isinstance(weight_map, dict):
  138. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  139. self.tensor_names.update(weight_map.keys())
  140. else:
  141. self.tensor_names = tensor_names_from_parts
  142. weight_map = {}
  143. for part_name in self.part_names:
  144. logger.info(f"gguf: loading model part '{part_name}'")
  145. ctx: ContextManager[Any]
  146. if self.is_safetensors:
  147. from safetensors import safe_open
  148. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  149. else:
  150. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  151. with ctx as model_part:
  152. tensor_names_from_parts.update(model_part.keys())
  153. for name in model_part.keys():
  154. if self.is_safetensors:
  155. if self.lazy:
  156. data = model_part.get_slice(name)
  157. data = LazyTorchTensor.from_safetensors_slice(data)
  158. else:
  159. data = model_part.get_tensor(name)
  160. else:
  161. data = model_part[name]
  162. if self.lazy:
  163. data = LazyTorchTensor.from_eager(data)
  164. yield name, data
  165. # verify tensor name presence and identify potentially missing files
  166. if len(tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  167. missing = sorted(self.tensor_names.difference(tensor_names_from_parts))
  168. extra = sorted(tensor_names_from_parts.difference(self.tensor_names))
  169. missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
  170. if len(extra) == 0 and len(missing_files) > 0:
  171. raise ValueError(f"Missing or incomplete model files: {missing_files}\n"
  172. f"Missing tensors: {missing}")
  173. else:
  174. raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
  175. f"Missing tensors: {missing}\n"
  176. f"Extra tensors: {extra}")
  177. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  178. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  179. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  180. name: str = gguf.TENSOR_NAMES[key]
  181. if "{bid}" in name:
  182. assert bid is not None
  183. name = name.format(bid=bid)
  184. return name + suffix
  185. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  186. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  187. return False
  188. key_name: str = gguf.TENSOR_NAMES[key]
  189. if "{bid}" in key_name:
  190. if bid is None:
  191. return False
  192. key_name = key_name.format(bid=bid)
  193. else:
  194. if bid is not None:
  195. return False
  196. return name == (key_name + suffix)
  197. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  198. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  199. if new_name is None:
  200. raise ValueError(f"Can not map tensor {name!r}")
  201. return new_name
  202. def set_gguf_parameters(self):
  203. raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses")
  204. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  205. del bid # unused
  206. return [(self.map_tensor_name(name), data_torch)]
  207. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  208. del name, new_name, bid, n_dims # unused
  209. return False
  210. # some models need extra generated tensors (like rope_freqs)
  211. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  212. return ()
  213. def prepare_tensors(self):
  214. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  215. for name, data_torch in chain(self.generate_extra_tensors(), self.get_tensors()):
  216. # we don't need these
  217. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  218. continue
  219. old_dtype = data_torch.dtype
  220. # convert any unsupported data types to float32
  221. if data_torch.dtype not in (torch.float16, torch.float32):
  222. data_torch = data_torch.to(torch.float32)
  223. # use the first number-like part of the tensor name as the block id
  224. bid = None
  225. for part in name.split("."):
  226. if part.isdecimal():
  227. bid = int(part)
  228. break
  229. for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
  230. # TODO: why do we squeeze here?
  231. # data = data_torch.squeeze().numpy()
  232. data = data_torch.numpy()
  233. # if data ends up empty, it means data_torch was a scalar tensor -> restore
  234. if len(data.shape) == 0:
  235. data = data_torch.numpy()
  236. n_dims = len(data.shape)
  237. data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
  238. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  239. if n_dims <= 1 or new_name.endswith("_norm.weight"):
  240. data_qtype = gguf.GGMLQuantizationType.F32
  241. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  242. # Some tensor types are always in float32
  243. if data_qtype is False and (
  244. any(
  245. self.match_model_tensor_name(new_name, key, bid)
  246. for key in (
  247. gguf.MODEL_TENSOR.FFN_GATE_INP,
  248. gguf.MODEL_TENSOR.POS_EMBD,
  249. gguf.MODEL_TENSOR.TOKEN_TYPES,
  250. gguf.MODEL_TENSOR.SSM_CONV1D,
  251. gguf.MODEL_TENSOR.TIME_MIX_FIRST,
  252. gguf.MODEL_TENSOR.TIME_MIX_W1,
  253. gguf.MODEL_TENSOR.TIME_MIX_W2,
  254. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
  255. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
  256. gguf.MODEL_TENSOR.TIME_MIX_LERP_FUSED,
  257. gguf.MODEL_TENSOR.POSNET_NORM1,
  258. gguf.MODEL_TENSOR.POSNET_NORM2,
  259. )
  260. )
  261. or not new_name.endswith(".weight")
  262. ):
  263. data_qtype = gguf.GGMLQuantizationType.F32
  264. if data_qtype is False and any(
  265. self.match_model_tensor_name(new_name, key, bid)
  266. for key in (
  267. gguf.MODEL_TENSOR.TOKEN_EMBD,
  268. gguf.MODEL_TENSOR.OUTPUT,
  269. )
  270. ):
  271. if self.ftype in (
  272. gguf.LlamaFileType.MOSTLY_TQ1_0,
  273. gguf.LlamaFileType.MOSTLY_TQ2_0,
  274. ):
  275. # TODO: use Q4_K and Q6_K
  276. data_qtype = gguf.GGMLQuantizationType.F16
  277. # No override (data_qtype is False), or wants to be quantized (data_qtype is True)
  278. if isinstance(data_qtype, bool):
  279. if self.ftype == gguf.LlamaFileType.ALL_F32:
  280. data_qtype = gguf.GGMLQuantizationType.F32
  281. elif self.ftype == gguf.LlamaFileType.MOSTLY_F16:
  282. data_qtype = gguf.GGMLQuantizationType.F16
  283. elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  284. data_qtype = gguf.GGMLQuantizationType.BF16
  285. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
  286. data_qtype = gguf.GGMLQuantizationType.Q8_0
  287. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
  288. data_qtype = gguf.GGMLQuantizationType.TQ1_0
  289. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
  290. data_qtype = gguf.GGMLQuantizationType.TQ2_0
  291. else:
  292. raise ValueError(f"Unknown file type: {self.ftype.name}")
  293. try:
  294. data = gguf.quants.quantize(data, data_qtype)
  295. except gguf.QuantError as e:
  296. logger.warning("%s, %s", e, "falling back to F16")
  297. data_qtype = gguf.GGMLQuantizationType.F16
  298. data = gguf.quants.quantize(data, data_qtype)
  299. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  300. # reverse shape to make it similar to the internal ggml dimension order
  301. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  302. # n_dims is implicit in the shape
  303. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  304. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  305. def set_type(self):
  306. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  307. def prepare_metadata(self, vocab_only: bool):
  308. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  309. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  310. # If we are using HF model id, set the metadata name to the model id
  311. if self.remote_hf_model_id:
  312. self.metadata.name = self.remote_hf_model_id
  313. # Fallback to model directory name if metadata name is still missing
  314. if self.metadata.name is None:
  315. self.metadata.name = self.dir_model.name
  316. # Generate parameter weight class (useful for leader boards) if not yet determined
  317. if self.metadata.size_label is None and total_params > 0:
  318. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  319. self.set_type()
  320. logger.info("Set meta model")
  321. self.metadata.set_gguf_meta_model(self.gguf_writer)
  322. logger.info("Set model parameters")
  323. self.set_gguf_parameters()
  324. logger.info("Set model quantization version")
  325. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  326. def write_vocab(self):
  327. raise NotImplementedError("write_vocab() must be implemented in subclasses")
  328. def write(self):
  329. self.prepare_tensors()
  330. self.prepare_metadata(vocab_only=False)
  331. self.gguf_writer.write_header_to_file(path=self.fname_out)
  332. self.gguf_writer.write_kv_data_to_file()
  333. self.gguf_writer.write_tensors_to_file(progress=True)
  334. self.gguf_writer.close()
  335. @staticmethod
  336. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  337. part_names: list[str] = []
  338. for filename in os.listdir(dir_model):
  339. if filename.startswith(prefix) and filename.endswith(suffix):
  340. part_names.append(filename)
  341. part_names.sort()
  342. return part_names
  343. @staticmethod
  344. def load_hparams(dir_model: Path):
  345. try:
  346. # for security reason, we don't allow loading remote code by default
  347. # if a model need remote code, we will fallback to config.json
  348. return AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict()
  349. except Exception as e:
  350. logger.warning(f"Failed to load model config from {dir_model}: {e}")
  351. logger.warning("Trying to load config.json instead")
  352. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  353. return json.load(f)
  354. @classmethod
  355. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  356. assert names
  357. def func(modelcls: AnyModel) -> AnyModel:
  358. model_type = ModelType.VISION if modelcls.model_arch == gguf.MODEL_ARCH.CLIP_VISION else ModelType.TEXT
  359. for name in names:
  360. cls._model_classes[model_type][name] = modelcls
  361. return modelcls
  362. return func
  363. @classmethod
  364. def print_registered_models(cls):
  365. for model_type, model_classes in cls._model_classes.items():
  366. logger.error(f"{model_type.name} models:")
  367. for name in sorted(model_classes.keys()):
  368. logger.error(f" - {name}")
  369. @classmethod
  370. def from_model_architecture(cls, arch: str, model_type = ModelType.TEXT) -> type[ModelBase]:
  371. try:
  372. return cls._model_classes[model_type][arch]
  373. except KeyError:
  374. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  375. class TextModel(ModelBase):
  376. model_type = ModelType.TEXT
  377. hf_arch: str
  378. def __init__(self, *args, **kwargs):
  379. super().__init__(*args, **kwargs)
  380. self.hf_arch = get_model_architecture(self.hparams, self.model_type)
  381. if "text_config" in self.hparams:
  382. # move the text_config to the root level
  383. self.hparams = {**self.hparams, **self.hparams["text_config"]}
  384. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  385. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  386. @classmethod
  387. def __init_subclass__(cls):
  388. # can't use an abstract property, because overriding it without type errors
  389. # would require using decorated functions instead of simply defining the property
  390. if "model_arch" not in cls.__dict__:
  391. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  392. def set_vocab(self):
  393. self._set_vocab_gpt2()
  394. def prepare_metadata(self, vocab_only: bool):
  395. super().prepare_metadata(vocab_only=vocab_only)
  396. total_params = self.gguf_writer.get_total_parameter_count()[0]
  397. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  398. output_type: str = self.ftype.name.partition("_")[2]
  399. # Filename Output
  400. if self.fname_out.is_dir():
  401. # Generate default filename based on model specification and available metadata
  402. if not vocab_only:
  403. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  404. else:
  405. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  406. # Use the default filename
  407. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  408. else:
  409. # Output path is a custom defined templated filename
  410. # Note: `not is_dir()` is used because `.is_file()` will not detect
  411. # file template strings as it doesn't actually exist as a file
  412. # Process templated file name with the output ftype, useful with the "auto" ftype
  413. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  414. logger.info("Set model tokenizer")
  415. self.set_vocab()
  416. def set_gguf_parameters(self):
  417. self.gguf_writer.add_block_count(self.block_count)
  418. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions"], optional=True)) is not None:
  419. self.gguf_writer.add_context_length(n_ctx)
  420. logger.info(f"gguf: context length = {n_ctx}")
  421. if (n_embd := self.find_hparam(["hidden_size", "n_embd"], optional=True)) is not None:
  422. self.gguf_writer.add_embedding_length(n_embd)
  423. logger.info(f"gguf: embedding length = {n_embd}")
  424. if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
  425. self.gguf_writer.add_feed_forward_length(n_ff)
  426. logger.info(f"gguf: feed forward length = {n_ff}")
  427. if (n_head := self.find_hparam(["num_attention_heads", "n_head"], optional=True)) is not None:
  428. self.gguf_writer.add_head_count(n_head)
  429. logger.info(f"gguf: head count = {n_head}")
  430. if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
  431. self.gguf_writer.add_head_count_kv(n_head_kv)
  432. logger.info(f"gguf: key-value head count = {n_head_kv}")
  433. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  434. self.gguf_writer.add_rope_freq_base(rope_theta)
  435. logger.info(f"gguf: rope theta = {rope_theta}")
  436. if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
  437. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  438. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  439. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  440. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  441. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  442. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  443. self.gguf_writer.add_expert_count(n_experts)
  444. logger.info(f"gguf: expert count = {n_experts}")
  445. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  446. self.gguf_writer.add_expert_used_count(n_experts_used)
  447. logger.info(f"gguf: experts used count = {n_experts_used}")
  448. if (head_dim := self.hparams.get("head_dim")) is not None:
  449. self.gguf_writer.add_key_length(head_dim)
  450. self.gguf_writer.add_value_length(head_dim)
  451. self.gguf_writer.add_file_type(self.ftype)
  452. logger.info(f"gguf: file type = {self.ftype}")
  453. def write_vocab(self):
  454. if len(self.gguf_writer.tensors) != 1:
  455. raise ValueError('Splitting the vocabulary is not supported')
  456. self.prepare_metadata(vocab_only=True)
  457. self.gguf_writer.write_header_to_file(path=self.fname_out)
  458. self.gguf_writer.write_kv_data_to_file()
  459. self.gguf_writer.close()
  460. def does_token_look_special(self, token: str | bytes) -> bool:
  461. if isinstance(token, (bytes, bytearray)):
  462. token_text = token.decode(encoding="utf-8")
  463. elif isinstance(token, memoryview):
  464. token_text = token.tobytes().decode(encoding="utf-8")
  465. else:
  466. token_text = token
  467. # Some models mark some added tokens which ought to be control tokens as not special.
  468. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  469. seems_special = token_text in (
  470. "<pad>", # deepseek-coder
  471. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  472. )
  473. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  474. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  475. # TODO: should these be marked as UNUSED instead? (maybe not)
  476. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  477. return seems_special
  478. # used for GPT-2 BPE and WordPiece vocabs
  479. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  480. tokens: list[str] = []
  481. toktypes: list[int] = []
  482. from transformers import AutoTokenizer
  483. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  484. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  485. assert max(tokenizer.vocab.values()) < vocab_size
  486. tokpre = self.get_vocab_base_pre(tokenizer)
  487. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  488. added_vocab = tokenizer.get_added_vocab()
  489. added_tokens_decoder = tokenizer.added_tokens_decoder
  490. for i in range(vocab_size):
  491. if i not in reverse_vocab:
  492. tokens.append(f"[PAD{i}]")
  493. toktypes.append(gguf.TokenType.UNUSED)
  494. else:
  495. token: str = reverse_vocab[i]
  496. if token in added_vocab:
  497. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  498. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  499. if not added_tokens_decoder[i].normalized:
  500. previous_token = token
  501. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  502. if previous_token != token:
  503. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  504. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  505. toktypes.append(gguf.TokenType.CONTROL)
  506. else:
  507. # NOTE: this was added for Gemma.
  508. # Encoding and decoding the tokens above isn't sufficient for this case.
  509. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  510. toktypes.append(gguf.TokenType.USER_DEFINED)
  511. else:
  512. toktypes.append(gguf.TokenType.NORMAL)
  513. tokens.append(token)
  514. return tokens, toktypes, tokpre
  515. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  516. # do not modify it manually!
  517. # ref: https://github.com/ggml-org/llama.cpp/pull/6920
  518. # Marker: Start get_vocab_base_pre
  519. def get_vocab_base_pre(self, tokenizer) -> str:
  520. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  521. # is specific for the BPE pre-tokenizer used by the model
  522. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  523. # use in llama.cpp to implement the same pre-tokenizer
  524. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  525. chktok = tokenizer.encode(chktxt)
  526. chkhsh = sha256(str(chktok).encode()).hexdigest()
  527. logger.debug(f"chktok: {chktok}")
  528. logger.debug(f"chkhsh: {chkhsh}")
  529. res = None
  530. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  531. # or pull the latest version of the model from Huggingface
  532. # don't edit the hashes manually!
  533. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  534. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  535. res = "llama-bpe"
  536. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  537. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  538. res = "deepseek-llm"
  539. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  540. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  541. res = "deepseek-coder"
  542. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  543. # ref: https://huggingface.co/tiiuae/falcon-7b
  544. res = "falcon"
  545. if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e":
  546. # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base
  547. res = "falcon3"
  548. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  549. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  550. res = "bert-bge"
  551. if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
  552. # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
  553. res = "bert-bge-large"
  554. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  555. # ref: https://huggingface.co/mosaicml/mpt-7b
  556. res = "mpt"
  557. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  558. # ref: https://huggingface.co/bigcode/starcoder2-3b
  559. res = "starcoder"
  560. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  561. # ref: https://huggingface.co/openai-community/gpt2
  562. res = "gpt-2"
  563. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  564. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  565. res = "stablelm2"
  566. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  567. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  568. res = "refact"
  569. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  570. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  571. res = "command-r"
  572. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  573. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  574. res = "qwen2"
  575. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  576. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  577. res = "olmo"
  578. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  579. # ref: https://huggingface.co/databricks/dbrx-base
  580. res = "dbrx"
  581. if chkhsh == "c7699093ba4255a91e702aa38a596aa81669f3525dae06c2953267dde580f448":
  582. # ref: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  583. res = "jina-v1-en"
  584. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  585. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  586. res = "jina-v2-en"
  587. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  588. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  589. res = "jina-v2-es"
  590. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  591. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  592. res = "jina-v2-de"
  593. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  594. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  595. res = "smaug-bpe"
  596. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  597. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  598. res = "poro-chat"
  599. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  600. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  601. res = "jina-v2-code"
  602. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b" or chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
  603. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  604. res = "chatglm-bpe"
  605. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  606. # ref: https://huggingface.co/LumiOpen/Viking-7B
  607. res = "viking"
  608. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  609. # ref: https://huggingface.co/core42/jais-13b
  610. res = "jais"
  611. if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f":
  612. # ref: https://huggingface.co/WisdomShell/CodeShell-7B
  613. res = "codeshell"
  614. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  615. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  616. res = "tekken"
  617. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  618. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  619. res = "smollm"
  620. if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
  621. # ref: https://huggingface.co/bigscience/bloom
  622. res = "bloom"
  623. if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
  624. # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
  625. res = "gpt3-finnish"
  626. if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
  627. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
  628. res = "exaone"
  629. if chkhsh == "fcace8b9cac38ce847670c970cd5892031a753a1ef381abd1d9af00f713da085":
  630. # ref: https://huggingface.co/microsoft/phi-2
  631. res = "phi-2"
  632. if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
  633. # ref: https://huggingface.co/facebook/chameleon-7b
  634. res = "chameleon"
  635. if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
  636. # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
  637. res = "minerva-7b"
  638. if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65":
  639. # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base
  640. res = "roberta-bpe"
  641. if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb":
  642. # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct
  643. res = "gigachat"
  644. if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
  645. # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
  646. res = "megrez"
  647. if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
  648. # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
  649. res = "deepseek-v3"
  650. if chkhsh == "b3f499bb4255f8ca19fccd664443283318f2fd2414d5e0b040fbdd0cc195d6c5":
  651. # ref: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
  652. res = "deepseek-r1-qwen"
  653. if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e":
  654. # ref: https://huggingface.co/Xenova/gpt-4o
  655. res = "gpt-4o"
  656. if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
  657. # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
  658. res = "superbpe"
  659. if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
  660. # ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
  661. res = "trillion"
  662. if chkhsh == "96a5f08be6259352137b512d4157e333e21df7edd3fcd152990608735a65b224":
  663. # ref: https://huggingface.co/inclusionAI/Ling-lite
  664. res = "bailingmoe"
  665. if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
  666. # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
  667. res = "llama4"
  668. if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
  669. # ref: https://huggingface.co/THUDM/glm-4-9b-hf
  670. res = "glm4"
  671. if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3":
  672. # ref: https://huggingface.co/mistral-community/pixtral-12b
  673. res = "pixtral"
  674. if res is None:
  675. logger.warning("\n")
  676. logger.warning("**************************************************************************************")
  677. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  678. logger.warning("** There are 2 possible reasons for this:")
  679. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  680. logger.warning("** - the pre-tokenization config has changed upstream")
  681. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  682. logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
  683. logger.warning("**")
  684. logger.warning(f"** chkhsh: {chkhsh}")
  685. logger.warning("**************************************************************************************")
  686. logger.warning("\n")
  687. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  688. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  689. logger.debug(f"chkhsh: {chkhsh}")
  690. return res
  691. # Marker: End get_vocab_base_pre
  692. def _set_vocab_none(self) -> None:
  693. self.gguf_writer.add_tokenizer_model("none")
  694. def _set_vocab_gpt2(self) -> None:
  695. tokens, toktypes, tokpre = self.get_vocab_base()
  696. self.gguf_writer.add_tokenizer_model("gpt2")
  697. self.gguf_writer.add_tokenizer_pre(tokpre)
  698. self.gguf_writer.add_token_list(tokens)
  699. self.gguf_writer.add_token_types(toktypes)
  700. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  701. special_vocab.add_to_gguf(self.gguf_writer)
  702. def _set_vocab_qwen(self):
  703. dir_model = self.dir_model
  704. hparams = self.hparams
  705. tokens: list[str] = []
  706. toktypes: list[int] = []
  707. from transformers import AutoTokenizer
  708. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  709. vocab_size = hparams["vocab_size"]
  710. assert max(tokenizer.get_vocab().values()) < vocab_size
  711. tokpre = self.get_vocab_base_pre(tokenizer)
  712. merges = []
  713. vocab = {}
  714. mergeable_ranks = tokenizer.mergeable_ranks
  715. for token, rank in mergeable_ranks.items():
  716. vocab[QwenModel.token_bytes_to_string(token)] = rank
  717. if len(token) == 1:
  718. continue
  719. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  720. assert len(merged) == 2
  721. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  722. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  723. added_vocab = tokenizer.special_tokens
  724. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  725. for i in range(vocab_size):
  726. if i not in reverse_vocab:
  727. tokens.append(f"[PAD{i}]")
  728. toktypes.append(gguf.TokenType.UNUSED)
  729. elif reverse_vocab[i] in added_vocab:
  730. tokens.append(reverse_vocab[i])
  731. toktypes.append(gguf.TokenType.CONTROL)
  732. else:
  733. tokens.append(reverse_vocab[i])
  734. toktypes.append(gguf.TokenType.NORMAL)
  735. self.gguf_writer.add_tokenizer_model("gpt2")
  736. self.gguf_writer.add_tokenizer_pre(tokpre)
  737. self.gguf_writer.add_token_list(tokens)
  738. self.gguf_writer.add_token_types(toktypes)
  739. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  740. special_vocab.merges = merges
  741. # only add special tokens when they were not already loaded from config.json
  742. if len(special_vocab.special_token_ids) == 0:
  743. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  744. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  745. # this one is usually not in config.json anyway
  746. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  747. special_vocab.add_to_gguf(self.gguf_writer)
  748. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  749. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  750. self.gguf_writer.add_tokenizer_model("llama")
  751. self.gguf_writer.add_tokenizer_pre("default")
  752. self.gguf_writer.add_token_list(tokens)
  753. self.gguf_writer.add_token_scores(scores)
  754. self.gguf_writer.add_token_types(toktypes)
  755. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  756. special_vocab.add_to_gguf(self.gguf_writer)
  757. def _create_vocab_sentencepiece(self):
  758. from sentencepiece import SentencePieceProcessor
  759. tokenizer_path = self.dir_model / 'tokenizer.model'
  760. if not tokenizer_path.is_file():
  761. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  762. tokenizer = SentencePieceProcessor()
  763. tokenizer.LoadFromFile(str(tokenizer_path))
  764. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  765. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  766. scores: list[float] = [-10000.0] * vocab_size
  767. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  768. for token_id in range(tokenizer.vocab_size()):
  769. piece = tokenizer.IdToPiece(token_id)
  770. text = piece.encode("utf-8")
  771. score = tokenizer.GetScore(token_id)
  772. toktype = SentencePieceTokenTypes.NORMAL
  773. if tokenizer.IsUnknown(token_id):
  774. toktype = SentencePieceTokenTypes.UNKNOWN
  775. elif tokenizer.IsControl(token_id):
  776. toktype = SentencePieceTokenTypes.CONTROL
  777. elif tokenizer.IsUnused(token_id):
  778. toktype = SentencePieceTokenTypes.UNUSED
  779. elif tokenizer.IsByte(token_id):
  780. toktype = SentencePieceTokenTypes.BYTE
  781. tokens[token_id] = text
  782. scores[token_id] = score
  783. toktypes[token_id] = toktype
  784. added_tokens_file = self.dir_model / 'added_tokens.json'
  785. if added_tokens_file.is_file():
  786. with open(added_tokens_file, "r", encoding="utf-8") as f:
  787. added_tokens_json = json.load(f)
  788. for key in added_tokens_json:
  789. token_id = added_tokens_json[key]
  790. if token_id >= vocab_size:
  791. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  792. continue
  793. tokens[token_id] = key.encode("utf-8")
  794. scores[token_id] = -1000.0
  795. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  796. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  797. if tokenizer_config_file.is_file():
  798. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  799. tokenizer_config_json = json.load(f)
  800. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  801. for token_id, token_data in added_tokens_decoder.items():
  802. token_id = int(token_id)
  803. token: str = token_data["content"]
  804. if token_id >= vocab_size:
  805. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  806. continue
  807. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  808. if tokens[token_id] != token.encode("utf-8"):
  809. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  810. if token_data.get("special") or self.does_token_look_special(token):
  811. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  812. else:
  813. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  814. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  815. scores[token_id] = -1000.0
  816. tokens[token_id] = token.encode("utf-8")
  817. if vocab_size > len(tokens):
  818. pad_count = vocab_size - len(tokens)
  819. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  820. for i in range(1, pad_count + 1):
  821. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  822. scores.append(-1000.0)
  823. toktypes.append(SentencePieceTokenTypes.UNUSED)
  824. return tokens, scores, toktypes
  825. def _set_vocab_llama_hf(self):
  826. vocab = gguf.LlamaHfVocab(self.dir_model)
  827. tokens = []
  828. scores = []
  829. toktypes = []
  830. for text, score, toktype in vocab.all_tokens():
  831. tokens.append(text)
  832. scores.append(score)
  833. toktypes.append(toktype)
  834. assert len(tokens) == vocab.vocab_size
  835. self.gguf_writer.add_tokenizer_model("llama")
  836. self.gguf_writer.add_tokenizer_pre("default")
  837. self.gguf_writer.add_token_list(tokens)
  838. self.gguf_writer.add_token_scores(scores)
  839. self.gguf_writer.add_token_types(toktypes)
  840. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  841. special_vocab.add_to_gguf(self.gguf_writer)
  842. def _set_vocab_rwkv_world(self):
  843. assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file()
  844. vocab_size = self.hparams.get("vocab_size", 65536)
  845. tokens: list[bytes] = ['<s>'.encode("utf-8")]
  846. toktypes: list[int] = [gguf.TokenType.CONTROL]
  847. with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f:
  848. lines = f.readlines()
  849. for line in lines:
  850. parts = line.split(' ')
  851. assert len(parts) >= 3
  852. token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1])
  853. token = token.encode("utf-8") if isinstance(token, str) else token
  854. assert isinstance(token, bytes)
  855. assert len(token) == token_len
  856. token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff"
  857. tokens.append(token_text.encode("utf-8"))
  858. toktypes.append(gguf.TokenType.NORMAL)
  859. remainder = vocab_size - len(tokens)
  860. assert remainder >= 0
  861. for i in range(len(tokens), vocab_size):
  862. tokens.append(f"[PAD{i}]".encode("utf-8"))
  863. toktypes.append(gguf.TokenType.UNUSED)
  864. self.gguf_writer.add_tokenizer_model("rwkv")
  865. self.gguf_writer.add_token_list(tokens)
  866. self.gguf_writer.add_token_types(toktypes)
  867. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  868. special_vocab.chat_template = "rwkv-world"
  869. # hack: Add '\n\n' as the EOT token to make it chat normally
  870. special_vocab._set_special_token("eot", 261)
  871. special_vocab.add_to_gguf(self.gguf_writer)
  872. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  873. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  874. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  875. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  876. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  877. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  878. assert field # tokenizer model
  879. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  880. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  881. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  882. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  883. assert field # token list
  884. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  885. if model_name == "llama-spm":
  886. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  887. assert field # token scores
  888. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  889. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  890. assert field # token types
  891. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  892. if model_name != "llama-spm":
  893. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  894. assert field # token merges
  895. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  896. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  897. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  898. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  899. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  900. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  901. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  902. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  903. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  904. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  905. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  906. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  907. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  908. def _try_set_pooling_type(self) -> None:
  909. # get pooling path
  910. pooling_path = None
  911. module_path = self.dir_model / "modules.json"
  912. if module_path.is_file():
  913. with open(module_path, encoding="utf-8") as f:
  914. modules = json.load(f)
  915. for mod in modules:
  916. if mod["type"] == "sentence_transformers.models.Pooling":
  917. pooling_path = mod["path"]
  918. break
  919. # get pooling type
  920. if pooling_path is not None:
  921. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  922. pooling = json.load(f)
  923. if pooling["pooling_mode_mean_tokens"]:
  924. pooling_type = gguf.PoolingType.MEAN
  925. elif pooling["pooling_mode_cls_token"]:
  926. pooling_type = gguf.PoolingType.CLS
  927. elif pooling["pooling_mode_lasttoken"]:
  928. pooling_type = gguf.PoolingType.LAST
  929. else:
  930. raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported")
  931. self.gguf_writer.add_pooling_type(pooling_type)
  932. class VisionModel(ModelBase):
  933. model_type = ModelType.VISION
  934. model_arch = gguf.MODEL_ARCH.CLIP_VISION
  935. preprocessor_config: dict[str, Any]
  936. global_config: dict[str, Any]
  937. def __init__(self, *args, **kwargs):
  938. super().__init__(*args, **kwargs)
  939. if self.model_arch != gguf.MODEL_ARCH.CLIP_VISION:
  940. raise TypeError("VisionModel must be subclassed with model_arch = gguf.MODEL_ARCH.CLIP_VISION")
  941. # get n_embd of the text model
  942. if "text_config" not in self.hparams:
  943. self.hparams["text_config"] = {}
  944. text_config = {**self.hparams, **self.hparams["text_config"]}
  945. self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0))
  946. assert self.n_embd_text > 0, "n_embd not found in hparams"
  947. if "vision_config" not in self.hparams:
  948. raise ValueError("vision_config not found in hparams")
  949. # move vision config to the top level, while preserving the original hparams in global_config
  950. self.global_config = self.hparams
  951. self.hparams = self.hparams["vision_config"]
  952. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"])
  953. self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.CLIP_VISION, self.block_count)
  954. # load preprocessor config
  955. with open(self.dir_model / "preprocessor_config.json", "r", encoding="utf-8") as f:
  956. self.preprocessor_config = json.load(f)
  957. def set_type(self):
  958. self.gguf_writer.add_type(gguf.GGUFType.CLIP_VISION)
  959. def set_gguf_parameters(self):
  960. self.gguf_writer.add_file_type(self.ftype)
  961. self.gguf_writer.add_vision_projection_dim(self.n_embd_text)
  962. self.gguf_writer.add_vision_has_vision_encoder(True)
  963. # vision config
  964. self.gguf_writer.add_vision_image_size(self.find_hparam(["image_size"]))
  965. self.gguf_writer.add_vision_patch_size(self.find_hparam(["patch_size"]))
  966. self.gguf_writer.add_vision_embedding_length(self.find_hparam(["hidden_size"]))
  967. self.gguf_writer.add_vision_feed_forward_length(self.find_hparam(["intermediate_size"]))
  968. self.gguf_writer.add_vision_block_count(self.block_count)
  969. self.gguf_writer.add_vision_head_count(self.find_hparam(["num_attention_heads"]))
  970. # preprocessor config
  971. self.gguf_writer.add_vision_image_mean(self.preprocessor_config["image_mean"])
  972. self.gguf_writer.add_vision_image_std(self.preprocessor_config["image_std"])
  973. def write_vocab(self):
  974. raise ValueError("VisionModel does not support vocab writing")
  975. @ModelBase.register("GPTNeoXForCausalLM")
  976. class GPTNeoXModel(TextModel):
  977. model_arch = gguf.MODEL_ARCH.GPTNEOX
  978. def set_gguf_parameters(self):
  979. block_count = self.hparams["num_hidden_layers"]
  980. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  981. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  982. self.gguf_writer.add_block_count(block_count)
  983. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  984. self.gguf_writer.add_rope_dimension_count(
  985. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  986. )
  987. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  988. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  989. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  990. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  991. del bid # unused
  992. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  993. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  994. tensors: list[tuple[str, Tensor]] = []
  995. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  996. # Map bloom-style qkv_linear to gpt-style qkv_linear
  997. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  998. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  999. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1000. data_torch = torch.cat(
  1001. (
  1002. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1003. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1004. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1005. ),
  1006. dim=0,
  1007. )
  1008. logger.info("re-format attention.linear_qkv.weight")
  1009. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  1010. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1011. data_torch = torch.cat(
  1012. (
  1013. qkv_bias[:, 0, :].reshape((n_embed,)),
  1014. qkv_bias[:, 1, :].reshape((n_embed,)),
  1015. qkv_bias[:, 2, :].reshape((n_embed,)),
  1016. ),
  1017. dim=0,
  1018. )
  1019. logger.info("re-format attention.linear_qkv.bias")
  1020. tensors.append((self.map_tensor_name(name), data_torch))
  1021. return tensors
  1022. @ModelBase.register("BloomForCausalLM", "BloomModel")
  1023. class BloomModel(TextModel):
  1024. model_arch = gguf.MODEL_ARCH.BLOOM
  1025. def set_gguf_parameters(self):
  1026. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1027. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1028. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  1029. self.gguf_writer.add_embedding_length(n_embed)
  1030. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  1031. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1032. self.gguf_writer.add_head_count(n_head)
  1033. self.gguf_writer.add_head_count_kv(n_head)
  1034. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1035. self.gguf_writer.add_file_type(self.ftype)
  1036. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1037. del bid # unused
  1038. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1039. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1040. name = re.sub(r'transformer\.', '', name)
  1041. tensors: list[tuple[str, Tensor]] = []
  1042. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  1043. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1044. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1045. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1046. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1047. data_torch = torch.cat(
  1048. (
  1049. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1050. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1051. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1052. ),
  1053. dim=0,
  1054. )
  1055. logger.info("re-format attention.linear_qkv.weight")
  1056. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  1057. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1058. data_torch = torch.cat(
  1059. (
  1060. qkv_bias[:, 0, :].reshape((n_embed,)),
  1061. qkv_bias[:, 1, :].reshape((n_embed,)),
  1062. qkv_bias[:, 2, :].reshape((n_embed,)),
  1063. ),
  1064. dim=0,
  1065. )
  1066. logger.info("re-format attention.linear_qkv.bias")
  1067. tensors.append((self.map_tensor_name(name), data_torch))
  1068. return tensors
  1069. @ModelBase.register("MPTForCausalLM")
  1070. class MPTModel(TextModel):
  1071. model_arch = gguf.MODEL_ARCH.MPT
  1072. def set_vocab(self):
  1073. try:
  1074. self._set_vocab_gpt2()
  1075. except Exception:
  1076. # Fallback for SEA-LION model
  1077. self._set_vocab_sentencepiece()
  1078. self.gguf_writer.add_add_bos_token(False)
  1079. self.gguf_writer.add_pad_token_id(3)
  1080. self.gguf_writer.add_eos_token_id(1)
  1081. self.gguf_writer.add_unk_token_id(0)
  1082. def set_gguf_parameters(self):
  1083. block_count = self.hparams["n_layers"]
  1084. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1085. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1086. self.gguf_writer.add_block_count(block_count)
  1087. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  1088. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1089. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  1090. self.gguf_writer.add_head_count_kv(kv_n_heads)
  1091. self.gguf_writer.add_layer_norm_eps(1e-5)
  1092. if self.hparams["attn_config"]["clip_qkv"] is not None:
  1093. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  1094. if self.hparams["attn_config"]["alibi"]:
  1095. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  1096. else:
  1097. self.gguf_writer.add_max_alibi_bias(0.0)
  1098. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1099. del bid # unused
  1100. if "scales" in name:
  1101. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  1102. new_name = new_name.replace("scales", "act.scales")
  1103. else:
  1104. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  1105. return [(new_name, data_torch)]
  1106. @ModelBase.register("OrionForCausalLM")
  1107. class OrionModel(TextModel):
  1108. model_arch = gguf.MODEL_ARCH.ORION
  1109. def set_vocab(self):
  1110. self._set_vocab_sentencepiece()
  1111. def set_gguf_parameters(self):
  1112. block_count = self.hparams["num_hidden_layers"]
  1113. head_count = self.hparams["num_attention_heads"]
  1114. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1115. ctx_length = 0
  1116. if "max_sequence_length" in self.hparams:
  1117. ctx_length = self.hparams["max_sequence_length"]
  1118. elif "max_position_embeddings" in self.hparams:
  1119. ctx_length = self.hparams["max_position_embeddings"]
  1120. elif "model_max_length" in self.hparams:
  1121. ctx_length = self.hparams["model_max_length"]
  1122. else:
  1123. raise ValueError("gguf: can not find ctx length parameter.")
  1124. self.gguf_writer.add_file_type(self.ftype)
  1125. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1126. self.gguf_writer.add_context_length(ctx_length)
  1127. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1128. self.gguf_writer.add_block_count(block_count)
  1129. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1130. self.gguf_writer.add_head_count(head_count)
  1131. self.gguf_writer.add_head_count_kv(head_count_kv)
  1132. # note: config provides rms norm but it is actually layer norm
  1133. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  1134. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  1135. @ModelBase.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  1136. class BaichuanModel(TextModel):
  1137. model_arch = gguf.MODEL_ARCH.BAICHUAN
  1138. def set_vocab(self):
  1139. self._set_vocab_sentencepiece()
  1140. def set_gguf_parameters(self):
  1141. block_count = self.hparams["num_hidden_layers"]
  1142. head_count = self.hparams["num_attention_heads"]
  1143. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1144. ctx_length = 0
  1145. if "max_sequence_length" in self.hparams:
  1146. ctx_length = self.hparams["max_sequence_length"]
  1147. elif "max_position_embeddings" in self.hparams:
  1148. ctx_length = self.hparams["max_position_embeddings"]
  1149. elif "model_max_length" in self.hparams:
  1150. ctx_length = self.hparams["model_max_length"]
  1151. else:
  1152. raise ValueError("gguf: can not find ctx length parameter.")
  1153. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1154. self.gguf_writer.add_context_length(ctx_length)
  1155. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1156. self.gguf_writer.add_block_count(block_count)
  1157. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1158. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1159. self.gguf_writer.add_head_count(head_count)
  1160. self.gguf_writer.add_head_count_kv(head_count_kv)
  1161. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1162. self.gguf_writer.add_file_type(self.ftype)
  1163. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1164. if self.hparams["rope_scaling"].get("type") == "linear":
  1165. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1166. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1167. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1168. head_count = self.hparams["num_attention_heads"]
  1169. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1170. tensors: list[tuple[str, Tensor]] = []
  1171. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  1172. logger.info(f"Unpacking and permuting layer {bid}")
  1173. tensors = [
  1174. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  1175. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  1176. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  1177. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  1178. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  1179. self._reverse_hf_part(data_torch, 2)),
  1180. ]
  1181. else:
  1182. tensors = [(self.map_tensor_name(name), data_torch)]
  1183. return tensors
  1184. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1185. if n_kv_head is not None and n_head != n_kv_head:
  1186. n_head //= n_kv_head
  1187. return (
  1188. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1189. .swapaxes(1, 2)
  1190. .reshape(weights.shape)
  1191. )
  1192. def _reverse_hf_permute_part(
  1193. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  1194. ) -> Tensor:
  1195. r = weights.shape[0] // 3
  1196. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  1197. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  1198. r = weights.shape[0] // 3
  1199. return weights[r * n_part:r * n_part + r, ...]
  1200. @ModelBase.register("XverseForCausalLM")
  1201. class XverseModel(TextModel):
  1202. model_arch = gguf.MODEL_ARCH.XVERSE
  1203. def set_vocab(self):
  1204. assert (self.dir_model / "tokenizer.json").is_file()
  1205. dir_model = self.dir_model
  1206. hparams = self.hparams
  1207. tokens: list[bytes] = []
  1208. toktypes: list[int] = []
  1209. from transformers import AutoTokenizer
  1210. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  1211. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  1212. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  1213. # because vocab_size is the count of items, and indexes start at 0.
  1214. max_vocab_index = max(tokenizer.get_vocab().values())
  1215. if max_vocab_index >= vocab_size:
  1216. raise ValueError("Vocabulary size exceeds expected maximum size.")
  1217. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  1218. added_vocab = tokenizer.get_added_vocab()
  1219. for token_id in range(vocab_size):
  1220. token_text = reverse_vocab[token_id].encode('utf-8')
  1221. # replace "\x00" to string with length > 0
  1222. if token_text == b"\x00":
  1223. toktype = gguf.TokenType.BYTE # special
  1224. token_text = f"<{token_text}>".encode('utf-8')
  1225. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  1226. toktype = gguf.TokenType.BYTE # special
  1227. elif reverse_vocab[token_id] in added_vocab:
  1228. if tokenizer.added_tokens_decoder[token_id].special:
  1229. toktype = gguf.TokenType.CONTROL
  1230. else:
  1231. toktype = gguf.TokenType.USER_DEFINED
  1232. else:
  1233. toktype = gguf.TokenType.NORMAL
  1234. tokens.append(token_text)
  1235. toktypes.append(toktype)
  1236. self.gguf_writer.add_tokenizer_model("llama")
  1237. self.gguf_writer.add_tokenizer_pre("default")
  1238. self.gguf_writer.add_token_list(tokens)
  1239. self.gguf_writer.add_token_types(toktypes)
  1240. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  1241. special_vocab.add_to_gguf(self.gguf_writer)
  1242. def set_gguf_parameters(self):
  1243. block_count = self.hparams["num_hidden_layers"]
  1244. head_count = self.hparams["num_attention_heads"]
  1245. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1246. ctx_length = 0
  1247. if "max_sequence_length" in self.hparams:
  1248. ctx_length = self.hparams["max_sequence_length"]
  1249. elif "max_position_embeddings" in self.hparams:
  1250. ctx_length = self.hparams["max_position_embeddings"]
  1251. elif "model_max_length" in self.hparams:
  1252. ctx_length = self.hparams["model_max_length"]
  1253. else:
  1254. raise ValueError("gguf: can not find ctx length parameter.")
  1255. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1256. self.gguf_writer.add_context_length(ctx_length)
  1257. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1258. self.gguf_writer.add_block_count(block_count)
  1259. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1260. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1261. self.gguf_writer.add_head_count(head_count)
  1262. self.gguf_writer.add_head_count_kv(head_count_kv)
  1263. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1264. self.gguf_writer.add_file_type(self.ftype)
  1265. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1266. if self.hparams["rope_scaling"].get("type") == "linear":
  1267. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1268. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1269. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1270. del bid # unused
  1271. head_count = self.hparams["num_attention_heads"]
  1272. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1273. # HF models permute some of the tensors, so we need to undo that
  1274. if name.endswith("q_proj.weight"):
  1275. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1276. if name.endswith("k_proj.weight"):
  1277. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1278. return [(self.map_tensor_name(name), data_torch)]
  1279. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1280. if n_kv_head is not None and n_head != n_kv_head:
  1281. n_head //= n_kv_head
  1282. return (
  1283. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1284. .swapaxes(1, 2)
  1285. .reshape(weights.shape)
  1286. )
  1287. @ModelBase.register("FalconForCausalLM", "RWForCausalLM")
  1288. class FalconModel(TextModel):
  1289. model_arch = gguf.MODEL_ARCH.FALCON
  1290. def set_gguf_parameters(self):
  1291. block_count = self.hparams.get("num_hidden_layers")
  1292. if block_count is None:
  1293. block_count = self.hparams["n_layer"] # old name
  1294. n_head = self.hparams.get("num_attention_heads")
  1295. if n_head is None:
  1296. n_head = self.hparams["n_head"] # old name
  1297. n_head_kv = self.hparams.get("num_kv_heads")
  1298. if n_head_kv is None:
  1299. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1300. self.gguf_writer.add_context_length(2048) # not in config.json
  1301. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1302. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1303. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1304. self.gguf_writer.add_block_count(block_count)
  1305. self.gguf_writer.add_head_count(n_head)
  1306. self.gguf_writer.add_head_count_kv(n_head_kv)
  1307. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1308. self.gguf_writer.add_file_type(self.ftype)
  1309. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1310. del bid # unused
  1311. # QKV tensor transform
  1312. # The original query_key_value tensor contains n_head_kv "kv groups",
  1313. # each consisting of n_head/n_head_kv query weights followed by one key
  1314. # and one value weight (shared by all query heads in the kv group).
  1315. # This layout makes it a big pain to work with in GGML.
  1316. # So we rearrange them here,, so that we have n_head query weights
  1317. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1318. # in contiguous fashion.
  1319. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1320. if "query_key_value" in name:
  1321. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1322. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1323. head_dim = self.hparams["hidden_size"] // n_head
  1324. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1325. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1326. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1327. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1328. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1329. return [(self.map_tensor_name(name), data_torch)]
  1330. @ModelBase.register("GPTBigCodeForCausalLM")
  1331. class StarCoderModel(TextModel):
  1332. model_arch = gguf.MODEL_ARCH.STARCODER
  1333. def set_gguf_parameters(self):
  1334. block_count = self.hparams["n_layer"]
  1335. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1336. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1337. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1338. self.gguf_writer.add_block_count(block_count)
  1339. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1340. self.gguf_writer.add_head_count_kv(1)
  1341. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1342. self.gguf_writer.add_file_type(self.ftype)
  1343. @ModelBase.register("GPTRefactForCausalLM")
  1344. class RefactModel(TextModel):
  1345. model_arch = gguf.MODEL_ARCH.REFACT
  1346. def set_vocab(self):
  1347. super().set_vocab()
  1348. # TODO: how to determine special FIM tokens automatically?
  1349. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1350. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1351. special_vocab._set_special_token("prefix", 1)
  1352. special_vocab._set_special_token("suffix", 3)
  1353. special_vocab._set_special_token("middle", 2)
  1354. special_vocab.chat_template = None # do not add it twice
  1355. special_vocab.add_to_gguf(self.gguf_writer)
  1356. def set_gguf_parameters(self):
  1357. hidden_dim = self.hparams["n_embd"]
  1358. inner_dim = 4 * hidden_dim
  1359. hidden_dim = int(2 * inner_dim / 3)
  1360. multiple_of = 256
  1361. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1362. block_count = self.hparams["n_layer"]
  1363. # refact uses Alibi. So this is from config.json which might be used by training.
  1364. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1365. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1366. self.gguf_writer.add_feed_forward_length(ff_dim)
  1367. self.gguf_writer.add_block_count(block_count)
  1368. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1369. self.gguf_writer.add_head_count_kv(1)
  1370. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1371. self.gguf_writer.add_file_type(self.ftype)
  1372. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1373. hidden_dim = self.hparams["n_embd"]
  1374. inner_dim = 4 * hidden_dim
  1375. hidden_dim = int(2 * inner_dim / 3)
  1376. multiple_of = 256
  1377. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1378. n_head = self.hparams["n_head"]
  1379. n_head_kv = 1
  1380. head_dim = self.hparams["n_embd"] // n_head
  1381. tensors: list[tuple[str, Tensor]] = []
  1382. if bid is not None:
  1383. if name == f"transformer.h.{bid}.attn.kv.weight":
  1384. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1385. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1386. elif name == f"transformer.h.{bid}.attn.q.weight":
  1387. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1388. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1389. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1390. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1391. if len(tensors) == 0:
  1392. tensors.append((self.map_tensor_name(name), data_torch))
  1393. return tensors
  1394. @ModelBase.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1395. class StableLMModel(TextModel):
  1396. model_arch = gguf.MODEL_ARCH.STABLELM
  1397. def set_vocab(self):
  1398. if (self.dir_model / "tokenizer.json").is_file():
  1399. self._set_vocab_gpt2()
  1400. else:
  1401. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1402. self._set_vocab_qwen()
  1403. def set_gguf_parameters(self):
  1404. hparams = self.hparams
  1405. block_count = hparams["num_hidden_layers"]
  1406. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1407. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1408. self.gguf_writer.add_block_count(block_count)
  1409. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1410. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1411. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1412. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1413. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1414. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1415. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1416. self.gguf_writer.add_file_type(self.ftype)
  1417. _q_norms: list[dict[str, Tensor]] | None = None
  1418. _k_norms: list[dict[str, Tensor]] | None = None
  1419. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1420. n_head = self.hparams["num_attention_heads"]
  1421. n_kv_head = self.hparams["num_key_value_heads"]
  1422. if name.find("q_layernorm.norms") != -1:
  1423. assert bid is not None
  1424. if self._q_norms is None:
  1425. self._q_norms = [{} for _ in range(self.block_count)]
  1426. self._q_norms[bid][name] = data_torch
  1427. if len(self._q_norms[bid]) >= n_head:
  1428. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1429. else:
  1430. return []
  1431. if name.find("k_layernorm.norms") != -1:
  1432. assert bid is not None
  1433. if self._k_norms is None:
  1434. self._k_norms = [{} for _ in range(self.block_count)]
  1435. self._k_norms[bid][name] = data_torch
  1436. if len(self._k_norms[bid]) >= n_kv_head:
  1437. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1438. else:
  1439. return []
  1440. return [(self.map_tensor_name(name), data_torch)]
  1441. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1442. datas: list[Tensor] = []
  1443. # extract the norms in order
  1444. for xid in range(n_head):
  1445. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1446. datas.append(norms[ename])
  1447. del norms[ename]
  1448. data_torch = torch.stack(datas, dim=0)
  1449. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1450. new_name = self.map_tensor_name(merged_name)
  1451. return [(new_name, data_torch)]
  1452. def prepare_tensors(self):
  1453. super().prepare_tensors()
  1454. if self._q_norms is not None or self._k_norms is not None:
  1455. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1456. norms = (
  1457. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1458. ) + (
  1459. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1460. )
  1461. if len(norms) > 0:
  1462. raise ValueError(f"Unprocessed norms: {norms}")
  1463. @ModelBase.register(
  1464. "LLaMAForCausalLM",
  1465. "LlamaForCausalLM",
  1466. "MistralForCausalLM",
  1467. "MixtralForCausalLM",
  1468. "VLlama3ForCausalLM",
  1469. "LlavaForConditionalGeneration")
  1470. class LlamaModel(TextModel):
  1471. model_arch = gguf.MODEL_ARCH.LLAMA
  1472. undo_permute = True
  1473. def __init__(self, *args, **kwargs):
  1474. super().__init__(*args, **kwargs)
  1475. # fix for SmolVLM2, missing `num_attention_heads` in config.json
  1476. if self.hf_arch == "VLlama3ForCausalLM":
  1477. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32)
  1478. def set_vocab(self):
  1479. try:
  1480. self._set_vocab_sentencepiece()
  1481. except FileNotFoundError:
  1482. try:
  1483. self._set_vocab_llama_hf()
  1484. except (FileNotFoundError, TypeError):
  1485. # Llama 3
  1486. self._set_vocab_gpt2()
  1487. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1488. if self.hparams.get("vocab_size", 32000) == 32016:
  1489. special_vocab = gguf.SpecialVocab(
  1490. self.dir_model, load_merges=False,
  1491. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1492. )
  1493. special_vocab._set_special_token("prefix", 32007)
  1494. special_vocab._set_special_token("suffix", 32008)
  1495. special_vocab._set_special_token("middle", 32009)
  1496. special_vocab._set_special_token("eot", 32010)
  1497. special_vocab.add_to_gguf(self.gguf_writer)
  1498. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1499. if tokenizer_config_file.is_file():
  1500. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1501. tokenizer_config_json = json.load(f)
  1502. if "add_prefix_space" in tokenizer_config_json:
  1503. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1504. # Apply to granite small models only
  1505. if self.hparams.get("vocab_size", 32000) == 49152:
  1506. self.gguf_writer.add_add_bos_token(False)
  1507. def set_gguf_parameters(self):
  1508. super().set_gguf_parameters()
  1509. hparams = self.hparams
  1510. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1511. if "head_dim" in hparams:
  1512. rope_dim = hparams["head_dim"]
  1513. else:
  1514. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1515. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1516. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1517. if self.hparams["rope_scaling"].get("type") == "linear":
  1518. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1519. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1520. @staticmethod
  1521. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1522. if n_head_kv is not None and n_head != n_head_kv:
  1523. n_head = n_head_kv
  1524. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1525. .swapaxes(1, 2)
  1526. .reshape(weights.shape))
  1527. _experts: list[dict[str, Tensor]] | None = None
  1528. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1529. n_head = self.hparams["num_attention_heads"]
  1530. n_kv_head = self.hparams.get("num_key_value_heads")
  1531. is_vision_tensor = "vision_tower" in name \
  1532. or "vision_model" in name \
  1533. or "model.connector" in name \
  1534. or "multi_modal_projector" in name
  1535. if is_vision_tensor:
  1536. return [] # skip vision tensors
  1537. elif name.startswith("model.text_model"):
  1538. name = name.replace("text_model.", "") # for SmolVLM
  1539. elif name.startswith("language_model."):
  1540. name = name.replace("language_model.", "") # for the rest
  1541. if self.undo_permute:
  1542. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1543. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1544. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1545. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1546. # process the experts separately
  1547. if name.find("block_sparse_moe.experts") != -1:
  1548. n_experts = self.hparams["num_local_experts"]
  1549. assert bid is not None
  1550. if self._experts is None:
  1551. self._experts = [{} for _ in range(self.block_count)]
  1552. self._experts[bid][name] = data_torch
  1553. if len(self._experts[bid]) >= n_experts * 3:
  1554. tensors: list[tuple[str, Tensor]] = []
  1555. # merge the experts into a single 3d tensor
  1556. for wid in ["w1", "w2", "w3"]:
  1557. datas: list[Tensor] = []
  1558. for xid in range(n_experts):
  1559. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1560. datas.append(self._experts[bid][ename])
  1561. del self._experts[bid][ename]
  1562. data_torch = torch.stack(datas, dim=0)
  1563. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1564. new_name = self.map_tensor_name(merged_name)
  1565. tensors.append((new_name, data_torch))
  1566. return tensors
  1567. else:
  1568. return []
  1569. return [(self.map_tensor_name(name), data_torch)]
  1570. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1571. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1572. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1573. base = self.hparams.get("rope_theta", 10000.0)
  1574. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1575. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1576. factor = rope_scaling.get("factor", 8.0)
  1577. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1578. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1579. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1580. low_freq_wavelen = old_context_len / low_freq_factor
  1581. high_freq_wavelen = old_context_len / high_freq_factor
  1582. # assert low_freq_wavelen != high_freq_wavelen # Errors for Llama4
  1583. rope_factors = []
  1584. for freq in freqs:
  1585. wavelen = 2 * math.pi / freq
  1586. if wavelen < high_freq_wavelen:
  1587. rope_factors.append(1)
  1588. elif wavelen > low_freq_wavelen:
  1589. rope_factors.append(factor)
  1590. else:
  1591. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1592. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1593. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1594. def prepare_tensors(self):
  1595. super().prepare_tensors()
  1596. if self._experts is not None:
  1597. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1598. experts = [k for d in self._experts for k in d.keys()]
  1599. if len(experts) > 0:
  1600. raise ValueError(f"Unprocessed experts: {experts}")
  1601. @ModelBase.register(
  1602. "LlavaForConditionalGeneration", # pixtral
  1603. "Mistral3ForConditionalGeneration", # mistral small 3.1
  1604. )
  1605. class LlavaVisionModel(VisionModel):
  1606. img_break_tok_id = -1
  1607. def __init__(self, *args, **kwargs):
  1608. super().__init__(*args, **kwargs)
  1609. if self.hparams["model_type"] == "pixtral":
  1610. # layer_norm_eps is not in config.json, it is hard-coded in modeling_pixtral.py
  1611. self.hparams["layer_norm_eps"] = self.hparams.get("layer_norm_eps", 1e-5)
  1612. self.img_break_tok_id = self.get_token_id("[IMG_BREAK]")
  1613. logger.info(f"Image break token id: {self.img_break_tok_id}")
  1614. else:
  1615. raise ValueError(f"Unsupported model type: {self.hparams['model_type']}")
  1616. def get_token_id(self, token: str) -> int:
  1617. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1618. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1619. added_tokens_decoder = json.load(f)['added_tokens_decoder']
  1620. for id_, token_data in added_tokens_decoder.items():
  1621. if token_data["content"] == token:
  1622. return int(id_)
  1623. raise ValueError(f"Token '{token}' not found in tokenizer config.")
  1624. def set_gguf_parameters(self):
  1625. super().set_gguf_parameters()
  1626. hparams = self.hparams
  1627. if hparams["model_type"] == "pixtral":
  1628. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.PIXTRAL)
  1629. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  1630. # hidden_act
  1631. if hparams["hidden_act"] == "silu":
  1632. self.gguf_writer.add_vision_use_silu(True)
  1633. elif hparams["hidden_act"] == "gelu":
  1634. self.gguf_writer.add_vision_use_gelu(True)
  1635. else:
  1636. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  1637. # spatial_merge_size
  1638. if "spatial_merge_size" in self.global_config:
  1639. self.gguf_writer.add_vision_spatial_merge_size(self.global_config["spatial_merge_size"])
  1640. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1641. del bid # unused
  1642. n_head = self.hparams["num_attention_heads"]
  1643. n_kv_head = n_head
  1644. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower."):
  1645. # process vision tensors
  1646. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1647. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1648. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1649. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1650. return [(self.map_tensor_name(name), data_torch)]
  1651. if self.img_break_tok_id > 0 and "embed_tokens.weight" in name:
  1652. logger.info(f"Extracting [IMG_BREAK] token embedding from {name}")
  1653. # for pixtral model, we need to extract the [IMG_BREAK] token embedding
  1654. img_break_embd = data_torch[self.img_break_tok_id]
  1655. name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK]
  1656. return [(self.map_tensor_name(name), img_break_embd)]
  1657. return [] # skip other tensors
  1658. @ModelBase.register("Idefics3ForConditionalGeneration", "SmolVLMForConditionalGeneration")
  1659. class SmolVLMModel(VisionModel):
  1660. def __init__(self, *args, **kwargs):
  1661. super().__init__(*args, **kwargs)
  1662. if self.hparams["model_type"] == "smolvlm_vision":
  1663. # fix for SmolVLM2, missing some keys in config.json
  1664. # default values are taken from transformers code
  1665. self.hparams["hidden_size"] = self.hparams.get("hidden_size", 1152)
  1666. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 16)
  1667. self.hparams["intermediate_size"] = self.hparams.get("intermediate_size", 3072)
  1668. def set_gguf_parameters(self):
  1669. super().set_gguf_parameters()
  1670. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.IDEFICS3)
  1671. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  1672. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("scale_factor", 2))
  1673. self.gguf_writer.add_vision_use_gelu(True)
  1674. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1675. del bid, new_name, n_dims # unused
  1676. if ".embeddings." in name:
  1677. return gguf.GGMLQuantizationType.F32
  1678. return False
  1679. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1680. del bid # unused
  1681. is_vision_tensor = "vision_tower" in name or "vision_model" in name or "model.connector" in name
  1682. if is_vision_tensor:
  1683. return [(self.map_tensor_name(name), data_torch)]
  1684. return [] # skip other tensors
  1685. @ModelBase.register("Llama4ForConditionalGeneration")
  1686. class Llama4Model(LlamaModel):
  1687. model_arch = gguf.MODEL_ARCH.LLAMA4
  1688. undo_permute = False
  1689. def __init__(self, *args, **kwargs):
  1690. super().__init__(*args, **kwargs)
  1691. # IMPORTANT: the normal "intermediate_size" is renamed to "intermediate_size_mlp", we need to undo this
  1692. self.hparams["intermediate_size_moe"] = self.hparams["intermediate_size"]
  1693. self.hparams["intermediate_size"] = self.hparams["intermediate_size_mlp"]
  1694. def set_vocab(self):
  1695. self._set_vocab_gpt2()
  1696. self.gguf_writer.add_add_bos_token(True)
  1697. def set_gguf_parameters(self):
  1698. super().set_gguf_parameters()
  1699. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["interleave_moe_layer_step"])
  1700. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
  1701. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  1702. # split the gate_up into gate and up
  1703. if "gate_up_proj" in name:
  1704. name_up = name.replace("gate_up_proj", "up_proj.weight")
  1705. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  1706. dim_half = data_torch.shape[-1] // 2
  1707. gate_proj_weight, up_proj_weight = data_torch.transpose(-1, -2).split(dim_half, dim=-2)
  1708. return [
  1709. (self.map_tensor_name(name_gate), gate_proj_weight),
  1710. (self.map_tensor_name(name_up), up_proj_weight)
  1711. ]
  1712. if name.endswith("down_proj"):
  1713. name += ".weight"
  1714. data_torch = data_torch.transpose(-1, -2)
  1715. if "multi_modal_projector" in name or "vision_model" in name:
  1716. return []
  1717. return super().modify_tensors(data_torch, name, bid)
  1718. @ModelBase.register("Mistral3ForConditionalGeneration")
  1719. class Mistral3Model(LlamaModel):
  1720. model_arch = gguf.MODEL_ARCH.LLAMA
  1721. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  1722. name = name.replace("language_model.", "")
  1723. if "multi_modal_projector" in name or "vision_tower" in name:
  1724. return []
  1725. return super().modify_tensors(data_torch, name, bid)
  1726. @ModelBase.register("DeciLMForCausalLM")
  1727. class DeciModel(TextModel):
  1728. model_arch = gguf.MODEL_ARCH.DECI
  1729. @staticmethod
  1730. def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int:
  1731. # DeciLM-specific code
  1732. intermediate_size = int(2 * ffn_mult * n_embd / 3)
  1733. return DeciModel._find_multiple(intermediate_size, 256)
  1734. @staticmethod
  1735. def _find_multiple(n: int, k: int) -> int:
  1736. # DeciLM-specific code
  1737. if n % k == 0:
  1738. return n
  1739. return n + k - (n % k)
  1740. def __init__(self, *args, **kwargs):
  1741. super().__init__(*args, **kwargs)
  1742. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  1743. _block_configs: list[dict[str,Any]] = self.hparams["block_configs"]
  1744. assert self.block_count == len(_block_configs)
  1745. self._num_kv_heads = list()
  1746. self._num_heads = list()
  1747. _ffn_multipliers = list()
  1748. # ***linear attention layer***
  1749. # if n_heads_in_group is None and replace_with_linear is True
  1750. # then _num_kv_heads[il] is 0 and _num_heads[il] is num_attention_heads
  1751. # ***attention-free layer***
  1752. # if n_heads_in_group is None and replace_with_linear is False
  1753. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0
  1754. # ***normal attention-layer***
  1755. # if n_heads_in_group is not None, then
  1756. # _num_kv_heads[il] is num_attention_head // n_heads_in_group and
  1757. # _num_heads[il] is num_attention_head
  1758. # ***dummy layer*** for nemotron 253B
  1759. # if n_heads_in_group is None and ffn_mult is None
  1760. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
  1761. for il in range(len(_block_configs)):
  1762. if _block_configs[il]["attention"]["n_heads_in_group"] is None:
  1763. if _block_configs[il]["attention"]["replace_with_linear"] is True:
  1764. self._num_kv_heads.append(0)
  1765. self._num_heads.append(self.hparams["num_attention_heads"])
  1766. else:
  1767. self._num_kv_heads.append(0)
  1768. self._num_heads.append(0)
  1769. else:
  1770. self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
  1771. self._num_heads.append(self.hparams["num_attention_heads"])
  1772. if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
  1773. _ffn_multipliers.append(0.0)
  1774. else:
  1775. _ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
  1776. assert self.block_count == len(self._num_kv_heads)
  1777. assert self.block_count == len(self._num_heads)
  1778. assert self.block_count == len(_ffn_multipliers)
  1779. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  1780. assert isinstance(self._num_heads, list) and isinstance(self._num_heads[0], int)
  1781. assert isinstance(_ffn_multipliers, list) and isinstance(_ffn_multipliers[0], float)
  1782. self._ffn_dims: list[int] = [
  1783. DeciModel._ffn_mult_to_intermediate_size(multiplier, self.hparams["hidden_size"])
  1784. for multiplier in _ffn_multipliers
  1785. ]
  1786. def set_vocab(self):
  1787. # Please change tokenizer_config.json of Llama-3_1-Nemotron-51B's
  1788. # eos_token from '|eot_id|' to '|end_of_text|'
  1789. if self.hparams.get("vocab_size", 128256) == 128256:
  1790. tokens, toktypes, tokpre = self.get_vocab_base()
  1791. self.gguf_writer.add_tokenizer_model("gpt2")
  1792. self.gguf_writer.add_tokenizer_pre(tokpre)
  1793. self.gguf_writer.add_token_list(tokens)
  1794. self.gguf_writer.add_token_types(toktypes)
  1795. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  1796. special_vocab.add_to_gguf(self.gguf_writer)
  1797. else:
  1798. # DeciLM-7B
  1799. self._set_vocab_llama_hf()
  1800. def set_gguf_parameters(self):
  1801. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  1802. assert self.block_count == len(self._num_kv_heads)
  1803. assert self.block_count == len(self._num_heads)
  1804. assert self.block_count == len(self._ffn_dims)
  1805. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  1806. self.gguf_writer.add_rope_freq_base(rope_theta)
  1807. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  1808. self.gguf_writer.add_head_count(self._num_heads)
  1809. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  1810. self.gguf_writer.add_block_count(self.block_count)
  1811. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1812. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1813. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1814. self.gguf_writer.add_key_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1815. self.gguf_writer.add_value_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1816. self.gguf_writer.add_file_type(self.ftype)
  1817. else: # DeciLM-7B
  1818. super().set_gguf_parameters()
  1819. if "num_key_value_heads_per_layer" in self.hparams: # DeciLM-7B
  1820. self._num_kv_heads: list[int] = self.hparams["num_key_value_heads_per_layer"]
  1821. assert self.block_count == len(self._num_kv_heads)
  1822. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  1823. hparams = self.hparams
  1824. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1825. if "head_dim" in hparams:
  1826. rope_dim = hparams["head_dim"]
  1827. else:
  1828. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1829. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1830. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1831. if self.hparams["rope_scaling"].get("type") == "linear":
  1832. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1833. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1834. @staticmethod
  1835. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1836. if n_head_kv is not None and n_head != n_head_kv:
  1837. n_head = n_head_kv
  1838. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1839. .swapaxes(1, 2)
  1840. .reshape(weights.shape))
  1841. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1842. n_head = self.hparams["num_attention_heads"]
  1843. if bid is not None:
  1844. if "num_key_value_heads_per_layer" in self.hparams:
  1845. n_kv_head = self.hparams["num_key_value_heads_per_layer"][bid]
  1846. elif "block_configs" in self.hparams:
  1847. n_kv_head = self._num_kv_heads[bid]
  1848. n_head = self._num_heads[bid]
  1849. else:
  1850. n_kv_head = self.hparams.get("num_key_value_heads")
  1851. else:
  1852. n_kv_head = self.hparams.get("num_key_value_heads")
  1853. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1854. data_torch = DeciModel.permute(data_torch, n_head, n_head)
  1855. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1856. data_torch = DeciModel.permute(data_torch, n_head, n_kv_head)
  1857. return [(self.map_tensor_name(name), data_torch)]
  1858. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1859. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1860. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1861. base = self.hparams.get("rope_theta", 10000.0)
  1862. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1863. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1864. factor = rope_scaling.get("factor", 8.0)
  1865. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1866. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1867. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1868. low_freq_wavelen = old_context_len / low_freq_factor
  1869. high_freq_wavelen = old_context_len / high_freq_factor
  1870. assert low_freq_wavelen != high_freq_wavelen
  1871. rope_factors = []
  1872. for freq in freqs:
  1873. wavelen = 2 * math.pi / freq
  1874. if wavelen < high_freq_wavelen:
  1875. rope_factors.append(1)
  1876. elif wavelen > low_freq_wavelen:
  1877. rope_factors.append(factor)
  1878. else:
  1879. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1880. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1881. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1882. def prepare_tensors(self):
  1883. super().prepare_tensors()
  1884. @ModelBase.register("BitnetForCausalLM")
  1885. class BitnetModel(TextModel):
  1886. model_arch = gguf.MODEL_ARCH.BITNET
  1887. def set_vocab(self):
  1888. self._set_vocab_sentencepiece()
  1889. def set_gguf_parameters(self):
  1890. super().set_gguf_parameters()
  1891. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1892. self.gguf_writer.add_rope_scaling_factor(1.0)
  1893. def weight_quant(self, weight: Tensor) -> Tensor:
  1894. dtype = weight.dtype
  1895. weight = weight.float()
  1896. scale = weight.abs().mean().clamp(min=1e-5)
  1897. iscale = 1 / scale
  1898. # TODO: multiply by the scale directly instead of inverting it twice
  1899. # (this is also unnecessarily doubly inverted upstream)
  1900. # ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
  1901. result = (weight * iscale).round().clamp(-1, 1) / iscale
  1902. return result.type(dtype)
  1903. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1904. new_name = self.map_tensor_name(name)
  1905. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  1906. gguf.MODEL_TENSOR.ATTN_Q,
  1907. gguf.MODEL_TENSOR.ATTN_K,
  1908. gguf.MODEL_TENSOR.ATTN_V,
  1909. gguf.MODEL_TENSOR.ATTN_OUT,
  1910. gguf.MODEL_TENSOR.FFN_UP,
  1911. gguf.MODEL_TENSOR.FFN_DOWN,
  1912. gguf.MODEL_TENSOR.FFN_GATE,
  1913. ]):
  1914. # transform weight into 1/0/-1 (in fp32)
  1915. data_torch = self.weight_quant(data_torch)
  1916. yield (new_name, data_torch)
  1917. @ModelBase.register("GrokForCausalLM")
  1918. class GrokModel(TextModel):
  1919. model_arch = gguf.MODEL_ARCH.GROK
  1920. def set_vocab(self):
  1921. self._set_vocab_sentencepiece()
  1922. def __init__(self, *args, **kwargs):
  1923. super().__init__(*args, **kwargs)
  1924. def set_gguf_parameters(self):
  1925. super().set_gguf_parameters()
  1926. _experts: list[dict[str, Tensor]] | None = None
  1927. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1928. # process the experts separately
  1929. if name.find(".moe.") != -1:
  1930. n_experts = self.hparams["num_local_experts"]
  1931. assert bid is not None
  1932. if self._experts is None:
  1933. self._experts = [{} for _ in range(self.block_count)]
  1934. self._experts[bid][name] = data_torch
  1935. if len(self._experts[bid]) >= n_experts * 3:
  1936. tensors: list[tuple[str, Tensor]] = []
  1937. # merge the experts into a single 3d tensor
  1938. for wid in ["linear", "linear_1", "linear_v"]:
  1939. datas: list[Tensor] = []
  1940. for xid in range(n_experts):
  1941. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
  1942. datas.append(self._experts[bid][ename])
  1943. del self._experts[bid][ename]
  1944. data_torch = torch.stack(datas, dim=0)
  1945. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
  1946. new_name = self.map_tensor_name(merged_name)
  1947. tensors.append((new_name, data_torch))
  1948. return tensors
  1949. else:
  1950. return []
  1951. return [(self.map_tensor_name(name), data_torch)]
  1952. @ModelBase.register("DbrxForCausalLM")
  1953. class DbrxModel(TextModel):
  1954. model_arch = gguf.MODEL_ARCH.DBRX
  1955. def set_gguf_parameters(self):
  1956. ffn_config = self.hparams["ffn_config"]
  1957. attn_config = self.hparams["attn_config"]
  1958. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  1959. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1960. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1961. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  1962. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1963. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  1964. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  1965. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  1966. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  1967. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  1968. self.gguf_writer.add_layer_norm_eps(1e-5)
  1969. self.gguf_writer.add_file_type(self.ftype)
  1970. logger.info(f"gguf: file type = {self.ftype}")
  1971. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1972. del bid # unused
  1973. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  1974. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  1975. n_embd = self.hparams["d_model"]
  1976. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  1977. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  1978. # But llama.cpp moe graph works differently
  1979. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  1980. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  1981. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1982. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  1983. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1984. experts = False
  1985. for exp_tensor_name in exp_tensor_names.keys():
  1986. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  1987. experts = True
  1988. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  1989. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  1990. data_torch = data_torch.permute(*permute_tensor)
  1991. break
  1992. # map tensor names
  1993. # In MoE models the ffn tensors are typically most of the model weights,
  1994. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  1995. # Every other model has the weight names ending in .weight,
  1996. # let's assume that is the convention which is not the case for dbrx:
  1997. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  1998. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  1999. return [(new_name, data_torch)]
  2000. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  2001. del name, new_name, bid # unused
  2002. return n_dims > 1
  2003. @ModelBase.register("MiniCPMForCausalLM")
  2004. class MiniCPMModel(TextModel):
  2005. model_arch = gguf.MODEL_ARCH.MINICPM
  2006. def set_gguf_parameters(self):
  2007. super().set_gguf_parameters()
  2008. embedding_scale = float(self.hparams["scale_emb"])
  2009. self.gguf_writer.add_embedding_scale(embedding_scale)
  2010. logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}")
  2011. residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5
  2012. self.gguf_writer.add_residual_scale(residual_scale)
  2013. logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}")
  2014. logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"]
  2015. self.gguf_writer.add_logit_scale(logit_scale)
  2016. logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}")
  2017. if self.hparams.get("rope_scaling") is not None:
  2018. if self.hparams["rope_scaling"].get("type") == "longrope":
  2019. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE)
  2020. logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}")
  2021. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2022. rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2023. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2024. if rope_scaling is not None:
  2025. long_factors = rope_scaling.get('long_factor', None)
  2026. short_factors = rope_scaling.get('short_factor', None)
  2027. if long_factors is None or short_factors is None:
  2028. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2029. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2030. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2031. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2032. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2033. def set_vocab(self):
  2034. self._set_vocab_sentencepiece()
  2035. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2036. del bid # unused
  2037. n_head = self.hparams["num_attention_heads"]
  2038. n_kv_head = self.hparams.get("num_key_value_heads")
  2039. # HF models permute some of the tensors, so we need to undo that
  2040. if name.endswith(("q_proj.weight")):
  2041. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2042. if name.endswith(("k_proj.weight")):
  2043. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2044. return [(self.map_tensor_name(name), data_torch)]
  2045. @ModelBase.register("MiniCPM3ForCausalLM")
  2046. class MiniCPM3Model(TextModel):
  2047. model_arch = gguf.MODEL_ARCH.MINICPM3
  2048. def set_gguf_parameters(self):
  2049. hparams = self.hparams
  2050. self.gguf_writer.add_file_type(self.ftype)
  2051. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2052. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2053. self.gguf_writer.add_block_count(self.block_count)
  2054. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2055. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2056. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  2057. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2058. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2059. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2060. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2061. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2062. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2063. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2064. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2065. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2066. if rope_scaling is not None:
  2067. rope_dims = self.hparams["qk_rope_head_dim"]
  2068. long_factors = rope_scaling.get('long_factor', None)
  2069. short_factors = rope_scaling.get('short_factor', None)
  2070. if long_factors is None or short_factors is None:
  2071. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2072. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2073. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2074. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2075. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2076. def set_vocab(self):
  2077. self._set_vocab_sentencepiece()
  2078. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  2079. if n_kv_head is not None and n_head != n_kv_head:
  2080. n_head //= n_kv_head
  2081. return (
  2082. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2083. .swapaxes(1, 2)
  2084. .reshape(weights.shape)
  2085. )
  2086. @ModelBase.register("QWenLMHeadModel")
  2087. class QwenModel(TextModel):
  2088. model_arch = gguf.MODEL_ARCH.QWEN
  2089. @staticmethod
  2090. def token_bytes_to_string(b):
  2091. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2092. byte_encoder = bytes_to_unicode()
  2093. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2094. @staticmethod
  2095. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2096. parts = [bytes([b]) for b in token]
  2097. while True:
  2098. min_idx = None
  2099. min_rank = None
  2100. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2101. rank = mergeable_ranks.get(pair[0] + pair[1])
  2102. if rank is not None and (min_rank is None or rank < min_rank):
  2103. min_idx = i
  2104. min_rank = rank
  2105. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2106. break
  2107. assert min_idx is not None
  2108. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2109. return parts
  2110. def set_vocab(self):
  2111. self._set_vocab_qwen()
  2112. def set_gguf_parameters(self):
  2113. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2114. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2115. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2116. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2117. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  2118. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2119. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2120. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2121. self.gguf_writer.add_file_type(self.ftype)
  2122. @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM")
  2123. class Qwen2Model(TextModel):
  2124. model_arch = gguf.MODEL_ARCH.QWEN2
  2125. def set_vocab(self):
  2126. try:
  2127. self._set_vocab_sentencepiece()
  2128. except FileNotFoundError:
  2129. self._set_vocab_gpt2()
  2130. def set_gguf_parameters(self):
  2131. super().set_gguf_parameters()
  2132. self._try_set_pooling_type()
  2133. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2134. if self.hparams["rope_scaling"].get("type") == "yarn":
  2135. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2136. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2137. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  2138. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2139. if self.hf_arch == "Qwen2Model":
  2140. name = f"model.{name}" # map to Qwen2ForCausalLM tensors
  2141. yield from super().modify_tensors(data_torch, name, bid)
  2142. @ModelBase.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2143. class Qwen2VLModel(TextModel):
  2144. model_arch = gguf.MODEL_ARCH.QWEN2VL
  2145. def set_gguf_parameters(self):
  2146. super().set_gguf_parameters()
  2147. mrope_section = self.hparams["rope_scaling"]["mrope_section"]
  2148. mrope_section += [0] * max(0, 4 - len(mrope_section))
  2149. self.gguf_writer.add_rope_dimension_sections(mrope_section)
  2150. def set_vocab(self):
  2151. try:
  2152. self._set_vocab_sentencepiece()
  2153. except FileNotFoundError:
  2154. self._set_vocab_gpt2()
  2155. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2156. del bid # unused
  2157. if name.startswith("visual."):
  2158. # skip visual tensors
  2159. return []
  2160. return [(self.map_tensor_name(name), data_torch)]
  2161. @ModelBase.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2162. class Qwen2VLVisionModel(VisionModel):
  2163. def __init__(self, *args, **kwargs):
  2164. super().__init__(*args, **kwargs)
  2165. self.hparams["image_size"] = self.hparams.get("image_size", 560)
  2166. # rename config.json values
  2167. self.hparams["num_attention_heads"] = self.hparams.get("num_heads")
  2168. self.hparams["num_hidden_layers"] = self.hparams.get("depth")
  2169. if "embed_dim" in self.hparams: # qwen2vl
  2170. self.hparams["intermediate_size"] = self.hparams.get("hidden_size")
  2171. self.hparams["hidden_size"] = self.hparams.get("embed_dim")
  2172. def set_gguf_parameters(self):
  2173. super().set_gguf_parameters()
  2174. hparams = self.hparams
  2175. if self.global_config['model_type'] == 'qwen2_vl':
  2176. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN2VL)
  2177. elif self.global_config['model_type'] == 'qwen2_5_vl':
  2178. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN25VL)
  2179. self.gguf_writer.add_vision_use_silu(True)
  2180. # find n_wa_pattern (window attention pattern)
  2181. fullatt_block_indexes = hparams.get("fullatt_block_indexes")
  2182. assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
  2183. n_wa_pattern = fullatt_block_indexes[0] + 1
  2184. # validate n_wa_pattern
  2185. for i in range(1, len(fullatt_block_indexes)):
  2186. if fullatt_block_indexes[i] - fullatt_block_indexes[i - 1] != n_wa_pattern:
  2187. raise ValueError(f"Invalid fullatt_block_indexes: {fullatt_block_indexes}")
  2188. self.gguf_writer.add_vision_n_wa_pattern(n_wa_pattern)
  2189. else:
  2190. raise ValueError(f"Unknown QwenVL model type: {self.global_config['model_type']}")
  2191. # default values below are taken from HF tranformers code
  2192. self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
  2193. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2194. del bid, name, n_dims # unused
  2195. if ".patch_embd." in new_name:
  2196. return gguf.GGMLQuantizationType.F16
  2197. if ".position_embd." in new_name:
  2198. return gguf.GGMLQuantizationType.F32
  2199. return False
  2200. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2201. del bid # unused
  2202. if name.startswith("visual."):
  2203. # process visual tensors
  2204. # split QKV tensors if needed
  2205. if ".qkv." in name:
  2206. if data_torch.ndim == 2: # weight
  2207. c3, _ = data_torch.shape
  2208. else: # bias
  2209. c3 = data_torch.shape[0]
  2210. assert c3 % 3 == 0
  2211. c = c3 // 3
  2212. wq = data_torch[:c]
  2213. wk = data_torch[c: c * 2]
  2214. wv = data_torch[c * 2:]
  2215. return [
  2216. (self.map_tensor_name(name.replace("qkv", "q")), wq),
  2217. (self.map_tensor_name(name.replace("qkv", "k")), wk),
  2218. (self.map_tensor_name(name.replace("qkv", "v")), wv),
  2219. ]
  2220. elif 'patch_embed.proj.weight' in name:
  2221. # split Conv3D into Conv2Ds
  2222. c1, c2, kt, kh, kw = data_torch.shape
  2223. del c1, c2, kh, kw # unused
  2224. assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
  2225. return [
  2226. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]),
  2227. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]),
  2228. ]
  2229. else:
  2230. return [(self.map_tensor_name(name), data_torch)]
  2231. return [] # skip other tensors
  2232. @ModelBase.register("WavTokenizerDec")
  2233. class WavTokenizerDecModel(TextModel):
  2234. model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC
  2235. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2236. del bid # unused
  2237. if \
  2238. name.endswith("codebook.cluster_size") or \
  2239. name.endswith("codebook.embed_avg") or \
  2240. name.endswith("codebook.inited"):
  2241. logger.debug(f"Skipping {name!r}")
  2242. return []
  2243. logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}")
  2244. return [(self.map_tensor_name(name), data_torch)]
  2245. def set_vocab(self):
  2246. self._set_vocab_none()
  2247. def set_gguf_parameters(self):
  2248. super().set_gguf_parameters()
  2249. self.gguf_writer.add_vocab_size (self.hparams["vocab_size"])
  2250. self.gguf_writer.add_features_length (self.hparams["n_embd_features"])
  2251. self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"])
  2252. self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"])
  2253. self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"])
  2254. self.gguf_writer.add_posnet_embedding_length(self.hparams["posnet"]["n_embd"])
  2255. self.gguf_writer.add_posnet_block_count (self.hparams["posnet"]["n_layer"])
  2256. self.gguf_writer.add_convnext_embedding_length(self.hparams["convnext"]["n_embd"])
  2257. self.gguf_writer.add_convnext_block_count (self.hparams["convnext"]["n_layer"])
  2258. self.gguf_writer.add_causal_attention(False)
  2259. @ModelBase.register("Qwen2MoeForCausalLM")
  2260. class Qwen2MoeModel(TextModel):
  2261. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  2262. def set_gguf_parameters(self):
  2263. super().set_gguf_parameters()
  2264. if (n_experts := self.hparams.get("num_experts")) is not None:
  2265. self.gguf_writer.add_expert_count(n_experts)
  2266. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2267. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2268. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  2269. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  2270. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  2271. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  2272. # YaRN is not enabled by default
  2273. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  2274. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2275. if self.hparams["rope_scaling"].get("type") == "yarn":
  2276. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2277. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2278. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  2279. _experts: list[dict[str, Tensor]] | None = None
  2280. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2281. # process the experts separately
  2282. if name.find("experts") != -1:
  2283. n_experts = self.hparams["num_experts"]
  2284. assert bid is not None
  2285. if self._experts is None:
  2286. self._experts = [{} for _ in range(self.block_count)]
  2287. self._experts[bid][name] = data_torch
  2288. if len(self._experts[bid]) >= n_experts * 3:
  2289. tensors: list[tuple[str, Tensor]] = []
  2290. # merge the experts into a single 3d tensor
  2291. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  2292. datas: list[Tensor] = []
  2293. for xid in range(n_experts):
  2294. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2295. datas.append(self._experts[bid][ename])
  2296. del self._experts[bid][ename]
  2297. data_torch = torch.stack(datas, dim=0)
  2298. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2299. new_name = self.map_tensor_name(merged_name)
  2300. tensors.append((new_name, data_torch))
  2301. return tensors
  2302. else:
  2303. return []
  2304. return [(self.map_tensor_name(name), data_torch)]
  2305. def prepare_tensors(self):
  2306. super().prepare_tensors()
  2307. if self._experts is not None:
  2308. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2309. experts = [k for d in self._experts for k in d.keys()]
  2310. if len(experts) > 0:
  2311. raise ValueError(f"Unprocessed experts: {experts}")
  2312. @ModelBase.register("Qwen3ForCausalLM")
  2313. class Qwen3Model(Qwen2Model):
  2314. model_arch = gguf.MODEL_ARCH.QWEN3
  2315. @ModelBase.register("Qwen3MoeForCausalLM")
  2316. class Qwen3MoeModel(Qwen2MoeModel):
  2317. model_arch = gguf.MODEL_ARCH.QWEN3MOE
  2318. @ModelBase.register("GPT2LMHeadModel")
  2319. class GPT2Model(TextModel):
  2320. model_arch = gguf.MODEL_ARCH.GPT2
  2321. def set_gguf_parameters(self):
  2322. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  2323. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  2324. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2325. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  2326. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2327. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2328. self.gguf_writer.add_file_type(self.ftype)
  2329. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2330. del bid # unused
  2331. tensors: list[tuple[str, Tensor]] = []
  2332. # we don't need these
  2333. if name.endswith((".attn.bias", ".attn.masked_bias")):
  2334. return tensors
  2335. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  2336. data_torch = data_torch.transpose(1, 0)
  2337. new_name = self.map_tensor_name(name)
  2338. tensors.append((new_name, data_torch))
  2339. return tensors
  2340. @ModelBase.register("PhiForCausalLM")
  2341. class Phi2Model(TextModel):
  2342. model_arch = gguf.MODEL_ARCH.PHI2
  2343. def set_gguf_parameters(self):
  2344. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  2345. rot_pct = self.find_hparam(["partial_rotary_factor"])
  2346. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2347. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2348. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  2349. self.gguf_writer.add_embedding_length(n_embd)
  2350. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  2351. self.gguf_writer.add_block_count(block_count)
  2352. self.gguf_writer.add_head_count(n_head)
  2353. self.gguf_writer.add_head_count_kv(n_head)
  2354. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  2355. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  2356. self.gguf_writer.add_file_type(self.ftype)
  2357. self.gguf_writer.add_add_bos_token(False)
  2358. @ModelBase.register("Phi3ForCausalLM")
  2359. class Phi3MiniModel(TextModel):
  2360. model_arch = gguf.MODEL_ARCH.PHI3
  2361. def set_vocab(self):
  2362. # Phi-4 model uses GPT2Tokenizer
  2363. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2364. if tokenizer_config_file.is_file():
  2365. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2366. tokenizer_config_json = json.load(f)
  2367. tokenizer_class = tokenizer_config_json['tokenizer_class']
  2368. if tokenizer_class == 'GPT2Tokenizer':
  2369. return self._set_vocab_gpt2()
  2370. from sentencepiece import SentencePieceProcessor
  2371. tokenizer_path = self.dir_model / 'tokenizer.model'
  2372. if not tokenizer_path.is_file():
  2373. raise ValueError(f'Error: Missing {tokenizer_path}')
  2374. tokenizer = SentencePieceProcessor()
  2375. tokenizer.LoadFromFile(str(tokenizer_path))
  2376. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2377. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2378. scores: list[float] = [-10000.0] * vocab_size
  2379. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2380. for token_id in range(tokenizer.vocab_size()):
  2381. piece = tokenizer.IdToPiece(token_id)
  2382. text = piece.encode("utf-8")
  2383. score = tokenizer.GetScore(token_id)
  2384. toktype = SentencePieceTokenTypes.NORMAL
  2385. if tokenizer.IsUnknown(token_id):
  2386. toktype = SentencePieceTokenTypes.UNKNOWN
  2387. elif tokenizer.IsControl(token_id):
  2388. toktype = SentencePieceTokenTypes.CONTROL
  2389. elif tokenizer.IsUnused(token_id):
  2390. toktype = SentencePieceTokenTypes.UNUSED
  2391. elif tokenizer.IsByte(token_id):
  2392. toktype = SentencePieceTokenTypes.BYTE
  2393. tokens[token_id] = text
  2394. scores[token_id] = score
  2395. toktypes[token_id] = toktype
  2396. added_tokens_file = self.dir_model / 'added_tokens.json'
  2397. if added_tokens_file.is_file():
  2398. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2399. added_tokens_json = json.load(f)
  2400. for key in added_tokens_json:
  2401. token_id = added_tokens_json[key]
  2402. if token_id >= vocab_size:
  2403. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2404. continue
  2405. tokens[token_id] = key.encode("utf-8")
  2406. scores[token_id] = -1000.0
  2407. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2408. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2409. if tokenizer_config_file.is_file():
  2410. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2411. tokenizer_config_json = json.load(f)
  2412. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  2413. for token_id, foken_data in added_tokens_decoder.items():
  2414. token_id = int(token_id)
  2415. token = foken_data["content"].encode("utf-8")
  2416. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2417. if tokens[token_id] != token:
  2418. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2419. tokens[token_id] = token
  2420. scores[token_id] = -1000.0
  2421. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2422. if foken_data.get("special"):
  2423. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2424. tokenizer_file = self.dir_model / 'tokenizer.json'
  2425. if tokenizer_file.is_file():
  2426. with open(tokenizer_file, "r", encoding="utf-8") as f:
  2427. tokenizer_json = json.load(f)
  2428. added_tokens = tokenizer_json.get("added_tokens", [])
  2429. for foken_data in added_tokens:
  2430. token_id = int(foken_data["id"])
  2431. token = foken_data["content"].encode("utf-8")
  2432. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2433. if tokens[token_id] != token:
  2434. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2435. tokens[token_id] = token
  2436. scores[token_id] = -1000.0
  2437. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2438. if foken_data.get("special"):
  2439. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2440. self.gguf_writer.add_tokenizer_model("llama")
  2441. self.gguf_writer.add_tokenizer_pre("default")
  2442. self.gguf_writer.add_token_list(tokens)
  2443. self.gguf_writer.add_token_scores(scores)
  2444. self.gguf_writer.add_token_types(toktypes)
  2445. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2446. special_vocab.add_to_gguf(self.gguf_writer)
  2447. def set_gguf_parameters(self):
  2448. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  2449. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2450. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2451. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  2452. rms_eps = self.find_hparam(["rms_norm_eps"])
  2453. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  2454. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  2455. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  2456. rope_dims = int(rot_pct * n_embd) // n_head
  2457. self.gguf_writer.add_context_length(max_pos_embds)
  2458. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  2459. self.gguf_writer.add_embedding_length(n_embd)
  2460. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  2461. self.gguf_writer.add_block_count(block_count)
  2462. self.gguf_writer.add_head_count(n_head)
  2463. self.gguf_writer.add_head_count_kv(n_head_kv)
  2464. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  2465. self.gguf_writer.add_rope_dimension_count(rope_dims)
  2466. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  2467. self.gguf_writer.add_file_type(self.ftype)
  2468. sliding_window = self.hparams.get("sliding_window")
  2469. # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
  2470. if sliding_window is None:
  2471. sliding_window = 0
  2472. self.gguf_writer.add_sliding_window(sliding_window)
  2473. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2474. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2475. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2476. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  2477. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  2478. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  2479. rope_dims = int(rot_pct * n_embd) // n_head
  2480. # write rope scaling for long context (128k) model
  2481. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2482. if rope_scaling is None:
  2483. return
  2484. scale = max_pos_embds / orig_max_pos_embds
  2485. rope_scaling_type = rope_scaling.get('type', '').lower()
  2486. if len(rope_scaling_type) == 0:
  2487. raise KeyError('Missing the required key rope_scaling.type')
  2488. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  2489. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  2490. elif rope_scaling_type == 'yarn':
  2491. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  2492. else:
  2493. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  2494. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  2495. long_factors = rope_scaling.get('long_factor', None)
  2496. short_factors = rope_scaling.get('short_factor', None)
  2497. if long_factors is None or short_factors is None:
  2498. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2499. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2500. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}. long_factors = {len(long_factors)}, short_factors = {len(short_factors)}.')
  2501. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2502. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2503. @ModelBase.register("PhiMoEForCausalLM")
  2504. class PhiMoeModel(Phi3MiniModel):
  2505. model_arch = gguf.MODEL_ARCH.PHIMOE
  2506. _experts: list[dict[str, Tensor]] | None = None
  2507. def set_gguf_parameters(self):
  2508. super().set_gguf_parameters()
  2509. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  2510. self.gguf_writer.add_expert_count(self.hparams["num_local_experts"])
  2511. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2512. # process the experts separately
  2513. if name.find("block_sparse_moe.experts") != -1:
  2514. n_experts = self.hparams["num_local_experts"]
  2515. assert bid is not None
  2516. if self._experts is None:
  2517. self._experts = [{} for _ in range(self.block_count)]
  2518. self._experts[bid][name] = data_torch
  2519. if len(self._experts[bid]) >= n_experts * 3:
  2520. tensors: list[tuple[str, Tensor]] = []
  2521. # merge the experts into a single 3d tensor
  2522. for w_name in ["w1", "w2", "w3"]:
  2523. datas: list[Tensor] = []
  2524. for xid in range(n_experts):
  2525. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  2526. datas.append(self._experts[bid][ename])
  2527. del self._experts[bid][ename]
  2528. data_torch = torch.stack(datas, dim=0)
  2529. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  2530. new_name = self.map_tensor_name(merged_name)
  2531. tensors.append((new_name, data_torch))
  2532. return tensors
  2533. else:
  2534. return []
  2535. return [(self.map_tensor_name(name), data_torch)]
  2536. def prepare_tensors(self):
  2537. super().prepare_tensors()
  2538. if self._experts is not None:
  2539. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2540. experts = [k for d in self._experts for k in d.keys()]
  2541. if len(experts) > 0:
  2542. raise ValueError(f"Unprocessed experts: {experts}")
  2543. @ModelBase.register("PlamoForCausalLM")
  2544. class PlamoModel(TextModel):
  2545. model_arch = gguf.MODEL_ARCH.PLAMO
  2546. def set_vocab(self):
  2547. self._set_vocab_sentencepiece()
  2548. def set_gguf_parameters(self):
  2549. hparams = self.hparams
  2550. block_count = hparams["num_hidden_layers"]
  2551. self.gguf_writer.add_context_length(4096) # not in config.json
  2552. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2553. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2554. self.gguf_writer.add_block_count(block_count)
  2555. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2556. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  2557. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2558. self.gguf_writer.add_file_type(self.ftype)
  2559. def shuffle_attn_q_weight(self, data_torch):
  2560. assert data_torch.size() == (5120, 5120)
  2561. data_torch = data_torch.reshape(8, 5, 128, 5120)
  2562. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  2563. data_torch = torch.reshape(data_torch, (5120, 5120))
  2564. return data_torch
  2565. def shuffle_attn_output_weight(self, data_torch):
  2566. assert data_torch.size() == (5120, 5120)
  2567. data_torch = data_torch.reshape(5120, 8, 5, 128)
  2568. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  2569. data_torch = torch.reshape(data_torch, (5120, 5120))
  2570. return data_torch
  2571. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2572. del bid # unused
  2573. new_name = self.map_tensor_name(name)
  2574. # shuffle for broadcasting of gqa in ggml_mul_mat
  2575. if new_name.endswith("attn_q.weight"):
  2576. data_torch = self.shuffle_attn_q_weight(data_torch)
  2577. elif new_name.endswith("attn_output.weight"):
  2578. data_torch = self.shuffle_attn_output_weight(data_torch)
  2579. return [(new_name, data_torch)]
  2580. @ModelBase.register("CodeShellForCausalLM")
  2581. class CodeShellModel(TextModel):
  2582. model_arch = gguf.MODEL_ARCH.CODESHELL
  2583. def set_gguf_parameters(self):
  2584. block_count = self.hparams["n_layer"]
  2585. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  2586. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2587. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  2588. self.gguf_writer.add_block_count(block_count)
  2589. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2590. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  2591. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2592. self.gguf_writer.add_file_type(self.ftype)
  2593. self.gguf_writer.add_rope_freq_base(10000.0)
  2594. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2595. self.gguf_writer.add_rope_scaling_factor(1.0)
  2596. _has_tok_embd = False
  2597. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2598. del bid # unused
  2599. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  2600. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  2601. new_name = self.map_tensor_name(name)
  2602. # assuming token_embd.weight is seen before output.weight
  2603. if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  2604. # even though the tensor file(s) does not contain the word embeddings they are still in the weight map
  2605. if self.tensor_names and "transformer.wte.weight" in self.tensor_names:
  2606. logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied")
  2607. self.tensor_names.remove("transformer.wte.weight")
  2608. elif new_name == tok_embd_name:
  2609. self._has_tok_embd = True
  2610. return [(new_name, data_torch)]
  2611. @ModelBase.register("InternLM2ForCausalLM")
  2612. class InternLM2Model(TextModel):
  2613. model_arch = gguf.MODEL_ARCH.INTERNLM2
  2614. def set_vocab(self):
  2615. # (TODO): Is there a better way?
  2616. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  2617. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  2618. # recognized as an empty string in C++.
  2619. from sentencepiece import SentencePieceProcessor
  2620. from sentencepiece import sentencepiece_model_pb2 as model
  2621. tokenizer_path = self.dir_model / 'tokenizer.model'
  2622. tokens: list[bytes] = []
  2623. scores: list[float] = []
  2624. toktypes: list[int] = []
  2625. if not tokenizer_path.is_file():
  2626. logger.error(f'Error: Missing {tokenizer_path}')
  2627. sys.exit(1)
  2628. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2629. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2630. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2631. tokenizer = SentencePieceProcessor()
  2632. tokenizer.LoadFromFile(str(tokenizer_path))
  2633. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2634. for token_id in range(vocab_size):
  2635. piece = tokenizer.IdToPiece(token_id)
  2636. text = piece.encode("utf-8")
  2637. score = tokenizer.GetScore(token_id)
  2638. if text == b"\x00":
  2639. # (TODO): fixme
  2640. # Hack here and replace the \x00 characters.
  2641. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  2642. text = "🐉".encode("utf-8")
  2643. toktype = SentencePieceTokenTypes.NORMAL
  2644. if tokenizer.IsUnknown(token_id):
  2645. toktype = SentencePieceTokenTypes.UNKNOWN
  2646. elif tokenizer.IsControl(token_id):
  2647. toktype = SentencePieceTokenTypes.CONTROL
  2648. elif tokenizer.IsUnused(token_id):
  2649. toktype = SentencePieceTokenTypes.UNUSED
  2650. elif tokenizer.IsByte(token_id):
  2651. toktype = SentencePieceTokenTypes.BYTE
  2652. # take care of ununsed raw token
  2653. if piece.startswith('[UNUSED'):
  2654. toktype = SentencePieceTokenTypes.UNUSED
  2655. tokens.append(text)
  2656. scores.append(score)
  2657. toktypes.append(toktype)
  2658. added_tokens_file = self.dir_model / 'added_tokens.json'
  2659. if added_tokens_file.is_file():
  2660. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2661. added_tokens_json = json.load(f)
  2662. for key in added_tokens_json:
  2663. tokens.append(key.encode("utf-8"))
  2664. scores.append(-1000.0)
  2665. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  2666. chat_eos_token = '<|im_end|>'
  2667. chat_eos_token_id = None
  2668. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2669. if tokenizer_config_file.is_file():
  2670. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2671. tokenizer_config_json = json.load(f)
  2672. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  2673. for token_id, foken_data in added_tokens_decoder.items():
  2674. token_id = int(token_id)
  2675. token = foken_data["content"]
  2676. if token == chat_eos_token:
  2677. chat_eos_token_id = token_id
  2678. token = token.encode("utf-8")
  2679. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2680. if tokens[token_id] != token:
  2681. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2682. tokens[token_id] = token
  2683. scores[token_id] = -1000.0
  2684. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2685. if foken_data.get("special"):
  2686. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2687. tokenizer_file = self.dir_model / 'tokenizer.json'
  2688. if tokenizer_file.is_file():
  2689. with open(tokenizer_file, "r", encoding="utf-8") as f:
  2690. tokenizer_json = json.load(f)
  2691. added_tokens = tokenizer_json.get("added_tokens", [])
  2692. for foken_data in added_tokens:
  2693. token_id = int(foken_data["id"])
  2694. token = foken_data["content"]
  2695. if token == chat_eos_token:
  2696. chat_eos_token_id = token_id
  2697. token = token.encode("utf-8")
  2698. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2699. if tokens[token_id] != token:
  2700. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2701. tokens[token_id] = token
  2702. scores[token_id] = -1000.0
  2703. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2704. if foken_data.get("special"):
  2705. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2706. self.gguf_writer.add_tokenizer_model("llama")
  2707. self.gguf_writer.add_tokenizer_pre("default")
  2708. self.gguf_writer.add_token_list(tokens)
  2709. self.gguf_writer.add_token_scores(scores)
  2710. self.gguf_writer.add_token_types(toktypes)
  2711. self.gguf_writer.add_add_space_prefix(add_prefix)
  2712. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2713. old_eos = special_vocab.special_token_ids["eos"]
  2714. if chat_eos_token_id is not None:
  2715. # For the chat model, we replace the eos with '<|im_end|>'.
  2716. # TODO: this is a hack, should be fixed
  2717. # https://github.com/ggml-org/llama.cpp/pull/6745#issuecomment-2067687048
  2718. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  2719. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  2720. " in chat mode so that the conversation can end normally.")
  2721. special_vocab.add_to_gguf(self.gguf_writer)
  2722. def set_gguf_parameters(self):
  2723. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2724. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2725. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2726. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2727. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  2728. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2729. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2730. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  2731. self.gguf_writer.add_file_type(self.ftype)
  2732. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2733. if self.hparams["rope_scaling"].get("type") == "linear":
  2734. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2735. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2736. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2737. num_heads = self.hparams["num_attention_heads"]
  2738. num_kv_heads = self.hparams["num_key_value_heads"]
  2739. n_embd = self.hparams["hidden_size"]
  2740. q_per_kv = num_heads // num_kv_heads
  2741. head_dim = n_embd // num_heads
  2742. num_groups = num_heads // q_per_kv
  2743. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  2744. qkv = data_torch
  2745. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  2746. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  2747. # The model weights of q and k equire additional reshape.
  2748. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  2749. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  2750. v = v.reshape((-1, v.shape[-1]))
  2751. return [
  2752. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  2753. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  2754. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  2755. ]
  2756. else:
  2757. return [(self.map_tensor_name(name), data_torch)]
  2758. @ModelBase.register("InternLM3ForCausalLM")
  2759. class InternLM3Model(TextModel):
  2760. model_arch = gguf.MODEL_ARCH.LLAMA
  2761. def set_vocab(self):
  2762. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  2763. self.gguf_writer.add_tokenizer_model("llama")
  2764. self.gguf_writer.add_tokenizer_pre("default")
  2765. self.gguf_writer.add_token_list(tokens)
  2766. self.gguf_writer.add_token_scores(scores)
  2767. self.gguf_writer.add_token_types(toktypes)
  2768. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2769. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2770. if tokenizer_config_file.is_file():
  2771. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2772. tokenizer_config_json = json.load(f)
  2773. if "add_prefix_space" in tokenizer_config_json:
  2774. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  2775. if "added_tokens_decoder" in tokenizer_config_json:
  2776. for token_id, token_data in tokenizer_config_json["added_tokens_decoder"].items():
  2777. if token_data.get("special"):
  2778. token_id = int(token_id)
  2779. token = token_data["content"]
  2780. special_vocab._set_special_token(token, token_id)
  2781. # update eos token
  2782. if token == '<|im_end|>' and "eos" in special_vocab.special_token_ids:
  2783. special_vocab.special_token_ids["eos"] = token_id
  2784. special_vocab.add_to_gguf(self.gguf_writer)
  2785. def set_gguf_parameters(self):
  2786. super().set_gguf_parameters()
  2787. hparams = self.hparams
  2788. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2789. if "head_dim" in hparams:
  2790. rope_dim = hparams["head_dim"]
  2791. else:
  2792. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  2793. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2794. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2795. if self.hparams["rope_scaling"].get("type") == "linear" or self.hparams["rope_scaling"].get("rope_type") == "linear":
  2796. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2797. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2798. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2799. n_head = self.hparams["num_attention_heads"]
  2800. n_kv_head = self.hparams.get("num_key_value_heads")
  2801. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2802. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2803. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2804. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2805. return [(self.map_tensor_name(name), data_torch)]
  2806. @ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel")
  2807. class BertModel(TextModel):
  2808. model_arch = gguf.MODEL_ARCH.BERT
  2809. def __init__(self, *args, **kwargs):
  2810. super().__init__(*args, **kwargs)
  2811. self.vocab_size = None
  2812. def set_gguf_parameters(self):
  2813. super().set_gguf_parameters()
  2814. self.gguf_writer.add_causal_attention(False)
  2815. self._try_set_pooling_type()
  2816. def set_vocab(self):
  2817. tokens, toktypes, tokpre = self.get_vocab_base()
  2818. self.vocab_size = len(tokens)
  2819. # we need this to validate the size of the token_type embeddings
  2820. # though currently we are passing all zeros to the token_type embeddings
  2821. # "Sequence A" or "Sequence B"
  2822. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  2823. # convert to phantom space vocab
  2824. def phantom(tok):
  2825. if tok.startswith("[") and tok.endswith("]"):
  2826. return tok
  2827. if tok.startswith("##"):
  2828. return tok[2:]
  2829. return "\u2581" + tok
  2830. tokens = list(map(phantom, tokens))
  2831. # add vocab to gguf
  2832. self.gguf_writer.add_tokenizer_model("bert")
  2833. self.gguf_writer.add_tokenizer_pre(tokpre)
  2834. self.gguf_writer.add_token_list(tokens)
  2835. self.gguf_writer.add_token_types(toktypes)
  2836. # handle special tokens
  2837. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2838. special_vocab.add_to_gguf(self.gguf_writer)
  2839. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2840. del bid # unused
  2841. if name.startswith("bert."):
  2842. name = name[5:]
  2843. if name.endswith(".gamma"):
  2844. name = name[:-6] + ".weight"
  2845. if name.endswith(".beta"):
  2846. name = name[:-5] + ".bias"
  2847. # we are only using BERT for embeddings so we don't need the pooling layer
  2848. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  2849. return [] # we don't need these
  2850. if name.startswith("cls.predictions"):
  2851. return []
  2852. if name.startswith("cls.seq_relationship"):
  2853. return []
  2854. return [(self.map_tensor_name(name), data_torch)]
  2855. def _xlmroberta_tokenizer_init(self) -> None:
  2856. # we need the pad_token_id to know how to chop down position_embd matrix
  2857. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  2858. self._position_offset = 1 + pad_token_id
  2859. if "max_position_embeddings" in self.hparams:
  2860. self.hparams["max_position_embeddings"] -= self._position_offset
  2861. else:
  2862. self._position_offset = None
  2863. def _xlmroberta_set_vocab(self) -> None:
  2864. # to avoid TypeError: Descriptors cannot be created directly
  2865. # exception when importing sentencepiece_model_pb2
  2866. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  2867. from sentencepiece import SentencePieceProcessor
  2868. from sentencepiece import sentencepiece_model_pb2 as model
  2869. tokenizer_path = self.dir_model / 'sentencepiece.bpe.model'
  2870. if not tokenizer_path.is_file():
  2871. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  2872. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2873. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2874. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  2875. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2876. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  2877. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  2878. tokenizer = SentencePieceProcessor()
  2879. tokenizer.LoadFromFile(str(tokenizer_path))
  2880. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2881. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2882. scores: list[float] = [-10000.0] * vocab_size
  2883. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2884. for token_id in range(tokenizer.vocab_size()):
  2885. piece = tokenizer.IdToPiece(token_id)
  2886. text = piece.encode("utf-8")
  2887. score = tokenizer.GetScore(token_id)
  2888. toktype = SentencePieceTokenTypes.NORMAL
  2889. if tokenizer.IsUnknown(token_id):
  2890. toktype = SentencePieceTokenTypes.UNKNOWN
  2891. elif tokenizer.IsControl(token_id):
  2892. toktype = SentencePieceTokenTypes.CONTROL
  2893. elif tokenizer.IsUnused(token_id):
  2894. toktype = SentencePieceTokenTypes.UNUSED
  2895. elif tokenizer.IsByte(token_id):
  2896. toktype = SentencePieceTokenTypes.BYTE
  2897. tokens[token_id] = text
  2898. scores[token_id] = score
  2899. toktypes[token_id] = toktype
  2900. if vocab_size > len(tokens):
  2901. pad_count = vocab_size - len(tokens)
  2902. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  2903. for i in range(1, pad_count + 1):
  2904. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  2905. scores.append(-1000.0)
  2906. toktypes.append(SentencePieceTokenTypes.UNUSED)
  2907. # realign tokens (see HF tokenizer code)
  2908. tokens = [b'<s>', b'<pad>', b'</s>', b'<unk>'] + tokens[3:-1]
  2909. scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1]
  2910. toktypes = [
  2911. SentencePieceTokenTypes.CONTROL,
  2912. SentencePieceTokenTypes.CONTROL,
  2913. SentencePieceTokenTypes.CONTROL,
  2914. SentencePieceTokenTypes.UNKNOWN,
  2915. ] + toktypes[3:-1]
  2916. self.gguf_writer.add_tokenizer_model("t5")
  2917. self.gguf_writer.add_tokenizer_pre("default")
  2918. self.gguf_writer.add_token_list(tokens)
  2919. self.gguf_writer.add_token_scores(scores)
  2920. self.gguf_writer.add_token_types(toktypes)
  2921. self.gguf_writer.add_add_space_prefix(add_prefix)
  2922. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  2923. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  2924. if precompiled_charsmap:
  2925. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  2926. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2927. special_vocab.add_to_gguf(self.gguf_writer)
  2928. self.gguf_writer.add_add_bos_token(True)
  2929. self.gguf_writer.add_add_eos_token(True)
  2930. @ModelBase.register("RobertaModel")
  2931. class RobertaModel(BertModel):
  2932. model_arch = gguf.MODEL_ARCH.BERT
  2933. def __init__(self, *args, **kwargs):
  2934. super().__init__(*args, **kwargs)
  2935. # we need the pad_token_id to know how to chop down position_embd matrix
  2936. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  2937. self._position_offset = 1 + pad_token_id
  2938. if "max_position_embeddings" in self.hparams:
  2939. self.hparams["max_position_embeddings"] -= self._position_offset
  2940. else:
  2941. self._position_offset = None
  2942. def set_vocab(self):
  2943. """Support BPE tokenizers for roberta models"""
  2944. bpe_tok_path = self.dir_model / "tokenizer.json"
  2945. if bpe_tok_path.exists():
  2946. self._set_vocab_gpt2()
  2947. self.gguf_writer.add_add_bos_token(True)
  2948. self.gguf_writer.add_add_eos_token(True)
  2949. # we need this to validate the size of the token_type embeddings
  2950. # though currently we are passing all zeros to the token_type embeddings
  2951. # "Sequence A" or "Sequence B"
  2952. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  2953. else:
  2954. return super().set_vocab()
  2955. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2956. # if name starts with "roberta.", remove the prefix
  2957. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  2958. if name.startswith("roberta."):
  2959. name = name[8:]
  2960. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  2961. if name == "embeddings.position_embeddings.weight":
  2962. if self._position_offset is not None:
  2963. data_torch = data_torch[self._position_offset:,:]
  2964. return super().modify_tensors(data_torch, name, bid)
  2965. @ModelBase.register("NomicBertModel")
  2966. class NomicBertModel(BertModel):
  2967. model_arch = gguf.MODEL_ARCH.BERT
  2968. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  2969. hparams = kwargs.pop("hparams", None)
  2970. if hparams is None:
  2971. hparams = ModelBase.load_hparams(dir_model)
  2972. self.is_moe = bool(hparams.get("moe_every_n_layers"))
  2973. self.model_arch = gguf.MODEL_ARCH.NOMIC_BERT_MOE if self.is_moe else gguf.MODEL_ARCH.NOMIC_BERT
  2974. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  2975. self._tokenizer_is_xlmroberta = self._is_tokenizer_xlmroberta()
  2976. if self._tokenizer_is_xlmroberta:
  2977. self._xlmroberta_tokenizer_init()
  2978. npos, mtp = self.hparams["n_positions"], self.hparams.get("max_trained_positions", 2048)
  2979. if npos == 8192 and mtp == 2048:
  2980. self.hparams["n_positions"] = 2048 # nomic-embed-text v1 and v1.5 are trained for 2048 tokens.
  2981. elif npos == 2048 and mtp == 2048:
  2982. self.hparams["n_positions"] = 512 # nomic-embed-text-v2-moe is trained for 512 tokens.
  2983. else:
  2984. raise ValueError(f"unrecognized parameters: n_positions={npos}, max_trained_positions={mtp}")
  2985. assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu"
  2986. # this doesn't do anything in the HF version
  2987. assert self.hparams["causal"] is False
  2988. # no bias tensors unless MoE
  2989. assert self.hparams["qkv_proj_bias"] == self.is_moe
  2990. assert self.hparams["mlp_fc1_bias"] == self.is_moe
  2991. assert self.hparams["mlp_fc2_bias"] == self.is_moe
  2992. # norm at end of layer
  2993. assert self.hparams["prenorm"] is False
  2994. # standard RoPE
  2995. assert self.hparams["rotary_emb_fraction"] == 1.0
  2996. assert self.hparams["rotary_emb_interleaved"] is False
  2997. assert self.hparams["rotary_emb_scale_base"] is None
  2998. def set_vocab(self) -> None:
  2999. if self._tokenizer_is_xlmroberta:
  3000. return self._xlmroberta_set_vocab()
  3001. return super().set_vocab()
  3002. def modify_tensors(self, data_torch: torch.Tensor, name: str, bid: int | None) -> Iterable[tuple[str, torch.Tensor]]:
  3003. # If the tensor is an experts bias tensor, skip it by returning an empty list.
  3004. if "mlp.experts.bias" in name:
  3005. return [] # Explicitly return an empty list.
  3006. if "mlp.experts.mlp.w1" in name:
  3007. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3008. name += ".weight"
  3009. if "mlp.experts.mlp.w2" in name:
  3010. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3011. data_torch = data_torch.transpose(1, 2)
  3012. name += ".weight"
  3013. return [(self.map_tensor_name(name), data_torch)]
  3014. def set_gguf_parameters(self):
  3015. super().set_gguf_parameters()
  3016. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  3017. if self.is_moe:
  3018. self.gguf_writer.add_moe_every_n_layers(self.hparams["moe_every_n_layers"])
  3019. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  3020. self.gguf_writer.add_expert_used_count(self.hparams["moe_top_k"])
  3021. def _is_tokenizer_xlmroberta(self) -> bool:
  3022. with open(self.dir_model / "tokenizer.json") as f:
  3023. tokenizer_json = json.load(f)
  3024. toktyp = tokenizer_json["model"]["type"]
  3025. if toktyp == "Unigram":
  3026. return True
  3027. if toktyp == "WordPiece":
  3028. return False
  3029. raise ValueError(f"unknown tokenizer: {toktyp}")
  3030. @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification")
  3031. class XLMRobertaModel(BertModel):
  3032. model_arch = gguf.MODEL_ARCH.BERT
  3033. def __init__(self, *args, **kwargs):
  3034. super().__init__(*args, **kwargs)
  3035. self._xlmroberta_tokenizer_init()
  3036. def set_vocab(self):
  3037. self._xlmroberta_set_vocab()
  3038. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3039. # if name starts with "roberta.", remove the prefix
  3040. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  3041. if name.startswith("roberta."):
  3042. name = name[8:]
  3043. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  3044. if name == "embeddings.position_embeddings.weight":
  3045. if self._position_offset is not None:
  3046. data_torch = data_torch[self._position_offset:,:]
  3047. return super().modify_tensors(data_torch, name, bid)
  3048. @ModelBase.register("GemmaForCausalLM")
  3049. class GemmaModel(TextModel):
  3050. model_arch = gguf.MODEL_ARCH.GEMMA
  3051. def set_vocab(self):
  3052. self._set_vocab_sentencepiece()
  3053. # TODO: these special tokens should be exported only for the CodeGemma family
  3054. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  3055. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  3056. special_vocab._set_special_token("prefix", 67)
  3057. special_vocab._set_special_token("suffix", 69)
  3058. special_vocab._set_special_token("middle", 68)
  3059. special_vocab._set_special_token("fsep", 70)
  3060. special_vocab._set_special_token("eot", 107)
  3061. special_vocab.chat_template = None # do not add it twice
  3062. special_vocab.add_to_gguf(self.gguf_writer)
  3063. self.gguf_writer.add_add_space_prefix(False)
  3064. def set_gguf_parameters(self):
  3065. hparams = self.hparams
  3066. block_count = hparams["num_hidden_layers"]
  3067. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  3068. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3069. self.gguf_writer.add_block_count(block_count)
  3070. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3071. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3072. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  3073. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3074. self.gguf_writer.add_key_length(hparams["head_dim"])
  3075. self.gguf_writer.add_value_length(hparams["head_dim"])
  3076. self.gguf_writer.add_file_type(self.ftype)
  3077. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3078. del bid # unused
  3079. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  3080. # To prevent errors, skip loading lm_head.weight.
  3081. if name == "lm_head.weight":
  3082. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  3083. return []
  3084. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  3085. if name.endswith("norm.weight"):
  3086. data_torch = data_torch + 1
  3087. return [(self.map_tensor_name(name), data_torch)]
  3088. @ModelBase.register("Gemma2ForCausalLM")
  3089. class Gemma2Model(TextModel):
  3090. model_arch = gguf.MODEL_ARCH.GEMMA2
  3091. def set_vocab(self):
  3092. self._set_vocab_sentencepiece()
  3093. self.gguf_writer.add_add_space_prefix(False)
  3094. def set_gguf_parameters(self):
  3095. hparams = self.hparams
  3096. block_count = hparams["num_hidden_layers"]
  3097. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  3098. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3099. self.gguf_writer.add_block_count(block_count)
  3100. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3101. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3102. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  3103. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3104. self.gguf_writer.add_key_length(hparams["head_dim"])
  3105. self.gguf_writer.add_value_length(hparams["head_dim"])
  3106. self.gguf_writer.add_file_type(self.ftype)
  3107. self.gguf_writer.add_attn_logit_softcapping(
  3108. self.hparams["attn_logit_softcapping"]
  3109. )
  3110. self.gguf_writer.add_final_logit_softcapping(
  3111. self.hparams["final_logit_softcapping"]
  3112. )
  3113. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  3114. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3115. del bid # unused
  3116. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  3117. # To prevent errors, skip loading lm_head.weight.
  3118. if name == "lm_head.weight":
  3119. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  3120. return []
  3121. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  3122. if name.endswith("norm.weight"):
  3123. data_torch = data_torch + 1
  3124. return [(self.map_tensor_name(name), data_torch)]
  3125. @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration")
  3126. class Gemma3Model(TextModel):
  3127. model_arch = gguf.MODEL_ARCH.GEMMA3
  3128. def set_vocab(self):
  3129. self._set_vocab_sentencepiece()
  3130. self.gguf_writer.add_add_space_prefix(False)
  3131. def set_gguf_parameters(self):
  3132. hparams = self.hparams
  3133. block_count = hparams["num_hidden_layers"]
  3134. # some default values are not specified in the hparams
  3135. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072))
  3136. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3137. self.gguf_writer.add_block_count(block_count)
  3138. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3139. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8))
  3140. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6))
  3141. self.gguf_writer.add_key_length(hparams.get("head_dim", 256))
  3142. self.gguf_writer.add_value_length(hparams.get("head_dim", 256))
  3143. self.gguf_writer.add_file_type(self.ftype)
  3144. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers
  3145. # both attn_logit_softcapping and final_logit_softcapping are removed in Gemma3
  3146. assert hparams.get("attn_logit_softcapping") is None
  3147. assert hparams.get("final_logit_softcapping") is None
  3148. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  3149. self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4))
  3150. if hparams.get("rope_scaling") is not None:
  3151. assert hparams["rope_scaling"]["rope_type"] == "linear"
  3152. # important: this rope_scaling is only applied for global layers, and not used by 1B model
  3153. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3154. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  3155. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3156. del bid # unused
  3157. if name.startswith("language_model."):
  3158. name = name.replace("language_model.", "")
  3159. elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  3160. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  3161. return [] # skip vision tensors
  3162. # remove OOV (out-of-vocabulary) rows in token_embd
  3163. if "embed_tokens.weight" in name:
  3164. vocab = self._create_vocab_sentencepiece()
  3165. tokens = vocab[0]
  3166. data_torch = data_torch[:len(tokens)]
  3167. # ref code in Gemma3RMSNorm
  3168. # output = output * (1.0 + self.weight.float())
  3169. if name.endswith("norm.weight"):
  3170. data_torch = data_torch + 1
  3171. return [(self.map_tensor_name(name), data_torch)]
  3172. @ModelBase.register("Gemma3ForConditionalGeneration")
  3173. class Gemma3VisionModel(VisionModel):
  3174. def set_gguf_parameters(self):
  3175. super().set_gguf_parameters()
  3176. hparams = self.hparams
  3177. self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.GEMMA3)
  3178. # default values below are taken from HF tranformers code
  3179. self.gguf_writer.add_vision_attention_layernorm_eps(hparams.get("layer_norm_eps", 1e-6))
  3180. self.gguf_writer.add_vision_use_gelu(True)
  3181. # calculate proj_scale_factor (used by tinygemma3 test model)
  3182. image_seq_length = self.preprocessor_config.get("image_seq_length", 256)
  3183. n_per_side = int(image_seq_length ** 0.5)
  3184. image_size = self.hparams["image_size"]
  3185. patch_size = self.hparams["patch_size"]
  3186. proj_scale_factor = (image_size // patch_size) // n_per_side
  3187. if proj_scale_factor > 0 and proj_scale_factor != 4:
  3188. # we only need to write this if it's not the default value
  3189. # in this case, we are converting a test model
  3190. self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
  3191. def tensor_force_quant(self, name, new_name, bid, n_dims):
  3192. del bid, new_name, n_dims # unused
  3193. # related to https://github.com/ggml-org/llama.cpp/issues/13025
  3194. if "input_projection" in name:
  3195. return gguf.GGMLQuantizationType.F16
  3196. if ".embeddings." in name:
  3197. return gguf.GGMLQuantizationType.F32
  3198. return False
  3199. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3200. del bid # unused
  3201. if "vision_model.head." in name:
  3202. return [] # skip redundant tensors for tinygemma3
  3203. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  3204. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  3205. # process vision tensors
  3206. name = name.replace("_weight", ".weight")
  3207. # correct norm value ; only this "soft_emb_norm" need to be corrected as it's part of Gemma projector
  3208. # the other norm values are part of SigLIP model, and they are already correct
  3209. # ref code: Gemma3RMSNorm
  3210. if "soft_emb_norm.weight" in name:
  3211. logger.info(f"Correcting norm value for '{name}'")
  3212. data_torch = data_torch + 1
  3213. return [(self.map_tensor_name(name), data_torch)]
  3214. return [] # skip other tensors
  3215. @ModelBase.register("Starcoder2ForCausalLM")
  3216. class StarCoder2Model(TextModel):
  3217. model_arch = gguf.MODEL_ARCH.STARCODER2
  3218. @ModelBase.register("Rwkv6ForCausalLM")
  3219. class Rwkv6Model(TextModel):
  3220. model_arch = gguf.MODEL_ARCH.RWKV6
  3221. def set_vocab(self):
  3222. self._set_vocab_rwkv_world()
  3223. def set_gguf_parameters(self):
  3224. block_count = self.hparams["num_hidden_layers"]
  3225. head_size = self.hparams["head_size"]
  3226. hidden_size = self.hparams["hidden_size"]
  3227. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  3228. rescale_every_n_layers = self.hparams["rescale_every"]
  3229. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else int((hidden_size * 3.5) // 32 * 32)
  3230. time_mix_extra_dim = 64 if hidden_size == 4096 else 32
  3231. time_decay_extra_dim = 128 if hidden_size == 4096 else 64
  3232. # RWKV isn't context limited
  3233. self.gguf_writer.add_context_length(1048576)
  3234. self.gguf_writer.add_embedding_length(hidden_size)
  3235. self.gguf_writer.add_block_count(block_count)
  3236. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  3237. self.gguf_writer.add_rescale_every_n_layers(rescale_every_n_layers)
  3238. self.gguf_writer.add_wkv_head_size(head_size)
  3239. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  3240. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  3241. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3242. self.gguf_writer.add_file_type(self.ftype)
  3243. # required by llama.cpp, unused
  3244. self.gguf_writer.add_head_count(0)
  3245. lerp_weights: dict[int, dict[str, Tensor]] = {}
  3246. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3247. new_name = self.map_tensor_name(name)
  3248. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  3249. new_name += ".weight"
  3250. if new_name.endswith("time_mix_w1.weight") or new_name.endswith("time_mix_decay_w1.weight") or new_name.endswith("time_mix_decay_w2.weight"):
  3251. data_torch = data_torch.transpose(0, 1)
  3252. if new_name.endswith("time_mix_w2.weight"):
  3253. data_torch = data_torch.permute(0, 2, 1)
  3254. if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name:
  3255. data_torch = data_torch.squeeze()
  3256. try:
  3257. rescale_every_n_layers = self.hparams["rescale_every"]
  3258. if rescale_every_n_layers > 0:
  3259. if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):
  3260. data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers))
  3261. except KeyError:
  3262. pass
  3263. # concat time_mix_lerp weights to reduce some cpu overhead
  3264. # also reduces the number of tensors in the model
  3265. if bid is not None and "time_mix_lerp" in new_name and "time_mix_lerp_x" not in new_name:
  3266. try:
  3267. self.lerp_weights[bid][new_name] = data_torch
  3268. except KeyError:
  3269. self.lerp_weights[bid] = {new_name: data_torch}
  3270. if all(f"blk.{bid}.time_mix_lerp_{i}.weight" in self.lerp_weights[bid].keys() for i in ["w", "k", "v", "r", "g"]):
  3271. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3272. data = torch.stack([self.lerp_weights[bid][f"blk.{bid}.time_mix_lerp_{i}.weight"].unsqueeze(0) for i in ["w", "k", "v", "r", "g"]], dim=0).unsqueeze(1)
  3273. yield (new_name, data)
  3274. return
  3275. yield (new_name, data_torch)
  3276. @ModelBase.register("RWKV6Qwen2ForCausalLM")
  3277. class RWKV6Qwen2Model(Rwkv6Model):
  3278. model_arch = gguf.MODEL_ARCH.RWKV6QWEN2
  3279. def set_vocab(self):
  3280. try:
  3281. self._set_vocab_sentencepiece()
  3282. except FileNotFoundError:
  3283. self._set_vocab_gpt2()
  3284. def set_gguf_parameters(self):
  3285. block_count = self.hparams["num_hidden_layers"]
  3286. num_attention_heads = self.hparams["num_attention_heads"]
  3287. num_key_value_heads = self.hparams["num_key_value_heads"]
  3288. hidden_size = self.hparams["hidden_size"]
  3289. head_size = hidden_size // num_attention_heads
  3290. rms_norm_eps = self.hparams["rms_norm_eps"]
  3291. intermediate_size = self.hparams["intermediate_size"]
  3292. time_mix_extra_dim = self.hparams.get("lora_rank_tokenshift", 64 if hidden_size >= 4096 else 32)
  3293. time_decay_extra_dim = self.hparams.get("lora_rank_decay", 128 if hidden_size >= 4096 else 64)
  3294. # RWKV isn't context limited
  3295. self.gguf_writer.add_context_length(1048576)
  3296. self.gguf_writer.add_embedding_length(hidden_size)
  3297. self.gguf_writer.add_block_count(block_count)
  3298. self.gguf_writer.add_wkv_head_size(head_size)
  3299. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  3300. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  3301. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3302. self.gguf_writer.add_file_type(self.ftype)
  3303. # special parameters for time_mixing in RWKV6QWEN2
  3304. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3305. self.gguf_writer.add_token_shift_count(1)
  3306. # RWKV6QWEN2 use grouped key/value like GQA
  3307. self.gguf_writer.add_head_count_kv(num_key_value_heads)
  3308. # required by llama.cpp, unused
  3309. self.gguf_writer.add_head_count(0)
  3310. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3311. for new_name, data in super().modify_tensors(data_torch, name, bid):
  3312. if "time_mix_w1" in new_name or "time_mix_w2" in new_name:
  3313. data = data.view(5, -1, data.shape[-1])
  3314. # rwkv6qwen2 has a different order of rkvwg instead of the original wkvrg
  3315. # permute them here to avoid code changes
  3316. data = torch.stack([data[3], data[1], data[2], data[0], data[4]], dim=0).view(-1, data.shape[-1])
  3317. if "w2" in new_name:
  3318. data = data.view(5, -1, data.shape[-1])
  3319. yield (new_name, data)
  3320. continue
  3321. yield (new_name, data)
  3322. @ModelBase.register("Rwkv7ForCausalLM", "RWKV7ForCausalLM")
  3323. class Rwkv7Model(TextModel):
  3324. model_arch = gguf.MODEL_ARCH.RWKV7
  3325. def set_vocab(self):
  3326. self._set_vocab_rwkv_world()
  3327. def calc_lora_rank(self, hidden_size, exponent, multiplier):
  3328. return max(1, round(hidden_size ** exponent * multiplier / 32)) * 32
  3329. def set_gguf_parameters(self):
  3330. block_count = self.hparams["num_hidden_layers"]
  3331. try:
  3332. head_size = self.hparams["head_size"]
  3333. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  3334. except KeyError:
  3335. head_size = self.hparams["head_dim"]
  3336. layer_norm_eps = self.hparams["norm_eps"]
  3337. hidden_size = self.hparams["hidden_size"]
  3338. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else (hidden_size * 4)
  3339. # ICLR: In-Context-Learning-Rate
  3340. try:
  3341. lora_rank_decay = self.hparams["lora_rank_decay"] if self.hparams["lora_rank_decay"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3342. lora_rank_iclr = self.hparams["lora_rank_iclr"] if self.hparams["lora_rank_iclr"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3343. lora_rank_value_residual_mix = self.hparams["lora_rank_value_residual_mix"] if self.hparams["lora_rank_value_residual_mix"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  3344. lora_rank_gate = self.hparams["lora_rank_gate"] if self.hparams["lora_rank_gate"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  3345. except KeyError:
  3346. lora_rank_decay = self.hparams["decay_low_rank_dim"] if self.hparams["decay_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3347. lora_rank_iclr = self.hparams["a_low_rank_dim"] if self.hparams["a_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3348. lora_rank_value_residual_mix = self.hparams["v_low_rank_dim"] if self.hparams["v_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  3349. lora_rank_gate = self.hparams["gate_low_rank_dim"] if self.hparams["gate_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  3350. # RWKV isn't context limited
  3351. self.gguf_writer.add_context_length(1048576)
  3352. self.gguf_writer.add_embedding_length(hidden_size)
  3353. self.gguf_writer.add_block_count(block_count)
  3354. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  3355. self.gguf_writer.add_wkv_head_size(head_size)
  3356. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  3357. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  3358. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  3359. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  3360. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3361. self.gguf_writer.add_file_type(self.ftype)
  3362. # required by llama.cpp, unused
  3363. self.gguf_writer.add_head_count(0)
  3364. lerp_weights: dict[int, dict[str, Tensor]] = {}
  3365. lora_needs_transpose: bool = True
  3366. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3367. # unify tensor names here to make life easier
  3368. name = name.replace("blocks", "layers").replace("ffn", "feed_forward")
  3369. name = name.replace("self_attn", "attention").replace("attn", "attention")
  3370. name = name.replace("time_mixer.", "")
  3371. # lora layer names in fla-hub's impl
  3372. if "_lora.lora" in name:
  3373. self.lora_needs_transpose = False
  3374. name = name.replace("_lora.lora.0.weight", "1.weight")
  3375. name = name.replace("_lora.lora.2.weight", "2.weight")
  3376. name = name.replace("_lora.lora.2.bias", "0.weight")
  3377. name = name.replace("feed_forward_norm", "ln2")
  3378. name = name.replace("g_norm", "ln_x")
  3379. if "attention.v" in name and "value" not in self.map_tensor_name(name) and bid == 0:
  3380. # some models have dummy v0/v1/v2 on first layer while others don't
  3381. # ignore them all since they are not used
  3382. return
  3383. wkv_has_gate = self.hparams.get("wkv_has_gate", True)
  3384. lerp_list = ["r", "w", "k", "v", "a", "g"] if wkv_has_gate else ["r", "w", "k", "v", "a"]
  3385. if bid is not None and "attention.x_" in name:
  3386. if "attention.x_x" in name:
  3387. # already concatenated
  3388. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3389. data = data_torch.reshape(len(lerp_list), 1, 1, -1)
  3390. yield (new_name, data)
  3391. else:
  3392. try:
  3393. self.lerp_weights[bid][name] = data_torch
  3394. except KeyError:
  3395. self.lerp_weights[bid] = {name: data_torch}
  3396. if all(f"model.layers.{bid}.attention.x_{i}" in self.lerp_weights[bid].keys() for i in lerp_list):
  3397. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3398. data = torch.stack([self.lerp_weights[bid][f"model.layers.{bid}.attention.x_{i}"] for i in lerp_list], dim=0)
  3399. yield (new_name, data)
  3400. return
  3401. else:
  3402. data_torch = data_torch.squeeze()
  3403. new_name = self.map_tensor_name(name)
  3404. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  3405. new_name += ".weight"
  3406. if self.lora_needs_transpose and any(
  3407. new_name.endswith(t) for t in [
  3408. "time_mix_w1.weight", "time_mix_w2.weight",
  3409. "time_mix_a1.weight", "time_mix_a2.weight",
  3410. "time_mix_v1.weight", "time_mix_v2.weight",
  3411. "time_mix_g1.weight", "time_mix_g2.weight",
  3412. ]
  3413. ):
  3414. data_torch = data_torch.transpose(0, 1)
  3415. if 'r_k' in new_name:
  3416. data_torch = data_torch.flatten()
  3417. if bid == 0 and "time_mix_a" in new_name:
  3418. # dummy v0/v1/v2 on first layer
  3419. # easist way to make llama happy
  3420. yield (new_name.replace("time_mix_a", "time_mix_v"), data_torch)
  3421. yield (new_name, data_torch)
  3422. @ModelBase.register("RwkvHybridForCausalLM")
  3423. class ARwkv7Model(Rwkv7Model):
  3424. model_arch = gguf.MODEL_ARCH.ARWKV7
  3425. def set_vocab(self):
  3426. try:
  3427. self._set_vocab_sentencepiece()
  3428. except FileNotFoundError:
  3429. self._set_vocab_gpt2()
  3430. def set_gguf_parameters(self):
  3431. block_count = self.hparams["num_hidden_layers"]
  3432. hidden_size = self.hparams["hidden_size"]
  3433. head_size = self.hparams["head_size"]
  3434. rms_norm_eps = self.hparams["rms_norm_eps"]
  3435. intermediate_size = self.hparams["intermediate_size"]
  3436. wkv_has_gate = self.hparams["wkv_has_gate"]
  3437. assert self.hparams["wkv_version"] == 7
  3438. # ICLR: In-Context-Learning-Rate
  3439. lora_rank_decay = 64
  3440. lora_rank_iclr = 64
  3441. lora_rank_value_residual_mix = 32
  3442. lora_rank_gate = 128 if wkv_has_gate else 0
  3443. # RWKV isn't context limited
  3444. self.gguf_writer.add_context_length(1048576)
  3445. self.gguf_writer.add_embedding_length(hidden_size)
  3446. self.gguf_writer.add_block_count(block_count)
  3447. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3448. self.gguf_writer.add_wkv_head_size(head_size)
  3449. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  3450. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  3451. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  3452. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  3453. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3454. self.gguf_writer.add_file_type(self.ftype)
  3455. self.gguf_writer.add_token_shift_count(1)
  3456. # required by llama.cpp, unused
  3457. self.gguf_writer.add_head_count(0)
  3458. @ModelBase.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
  3459. class MambaModel(TextModel):
  3460. model_arch = gguf.MODEL_ARCH.MAMBA
  3461. def set_vocab(self):
  3462. vocab_size = self.hparams["vocab_size"]
  3463. # Round vocab size to next multiple of 8
  3464. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  3465. # pad using ceiling division
  3466. # ref: https://stackoverflow.com/a/17511341/22827863
  3467. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  3468. self.hparams["vocab_size"] = vocab_size
  3469. if (self.dir_model / "tokenizer.json").is_file():
  3470. self._set_vocab_gpt2()
  3471. elif (self.dir_model / "tokenizer.model").is_file():
  3472. self._set_vocab_sentencepiece()
  3473. else:
  3474. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  3475. self._set_vocab_builtin("gpt-neox", vocab_size)
  3476. def set_gguf_parameters(self):
  3477. d_model = self.find_hparam(["hidden_size", "d_model"])
  3478. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  3479. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  3480. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  3481. # ceiling division
  3482. # ref: https://stackoverflow.com/a/17511341/22827863
  3483. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  3484. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  3485. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  3486. use_dt_b_c_norm = False
  3487. # For falconmamba we do apply RMS norm on B / DT and C layers
  3488. if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
  3489. use_dt_b_c_norm = True
  3490. # Fail early for models which don't have a block expansion factor of 2
  3491. assert d_inner == 2 * d_model
  3492. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  3493. self.gguf_writer.add_embedding_length(d_model)
  3494. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  3495. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  3496. self.gguf_writer.add_block_count(self.block_count)
  3497. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  3498. self.gguf_writer.add_ssm_inner_size(d_inner)
  3499. self.gguf_writer.add_ssm_state_size(d_state)
  3500. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  3501. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3502. self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
  3503. self.gguf_writer.add_file_type(self.ftype)
  3504. _tok_embd = None
  3505. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3506. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  3507. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  3508. new_name = self.map_tensor_name(name)
  3509. if name.endswith(".A_log"):
  3510. logger.debug("A_log --> A ==> " + new_name)
  3511. data_torch = -torch.exp(data_torch)
  3512. # [4 1 8192 1] -> [4 8192 1 1]
  3513. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  3514. data_torch = data_torch.squeeze()
  3515. # assuming token_embd.weight is seen before output.weight
  3516. if self._tok_embd is not None and new_name == output_name:
  3517. if torch.equal(self._tok_embd, data_torch):
  3518. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  3519. return []
  3520. elif new_name == tok_embd_name:
  3521. self._tok_embd = data_torch
  3522. return [(new_name, data_torch)]
  3523. @ModelBase.register("CohereForCausalLM")
  3524. class CommandR2Model(TextModel):
  3525. model_arch = gguf.MODEL_ARCH.COMMAND_R
  3526. def __init__(self, *args, **kwargs):
  3527. super().__init__(*args, **kwargs)
  3528. # max_position_embeddings = 8192 in config.json but model was actually
  3529. # trained on 128k context length
  3530. # aya-23 models don't have model_max_length specified
  3531. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  3532. def set_gguf_parameters(self):
  3533. super().set_gguf_parameters()
  3534. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  3535. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3536. @ModelBase.register("Cohere2ForCausalLM")
  3537. class Cohere2Model(TextModel):
  3538. model_arch = gguf.MODEL_ARCH.COHERE2
  3539. def set_gguf_parameters(self):
  3540. super().set_gguf_parameters()
  3541. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  3542. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  3543. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  3544. rotary_pct = self.hparams["rotary_pct"]
  3545. hidden_size = self.hparams["hidden_size"]
  3546. num_attention_heads = self.hparams["num_attention_heads"]
  3547. self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
  3548. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3549. @ModelBase.register("OlmoForCausalLM")
  3550. @ModelBase.register("OLMoForCausalLM")
  3551. class OlmoModel(TextModel):
  3552. model_arch = gguf.MODEL_ARCH.OLMO
  3553. def set_gguf_parameters(self):
  3554. super().set_gguf_parameters()
  3555. self.gguf_writer.add_layer_norm_eps(1e-5)
  3556. clip_qkv = self.hparams.get("clip_qkv")
  3557. if clip_qkv is not None:
  3558. self.gguf_writer.add_clamp_kqv(clip_qkv)
  3559. # Same as super class, but permuting q_proj, k_proj
  3560. # Copied from: LlamaModel
  3561. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3562. del bid # unused
  3563. n_head = self.hparams["num_attention_heads"]
  3564. n_kv_head = self.hparams.get("num_key_value_heads")
  3565. if name.endswith("q_proj.weight"):
  3566. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3567. if name.endswith("k_proj.weight"):
  3568. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3569. return [(self.map_tensor_name(name), data_torch)]
  3570. @ModelBase.register("Olmo2ForCausalLM")
  3571. class Olmo2Model(TextModel):
  3572. model_arch = gguf.MODEL_ARCH.OLMO2
  3573. @ModelBase.register("OlmoeForCausalLM")
  3574. class OlmoeModel(TextModel):
  3575. model_arch = gguf.MODEL_ARCH.OLMOE
  3576. def set_gguf_parameters(self):
  3577. super().set_gguf_parameters()
  3578. self.gguf_writer.add_layer_norm_rms_eps(1e-5)
  3579. if (n_experts := self.hparams.get("num_experts")) is not None:
  3580. self.gguf_writer.add_expert_count(n_experts)
  3581. _experts: list[dict[str, Tensor]] | None = None
  3582. # Copied from: Qwen2MoeModel
  3583. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3584. # process the experts separately
  3585. if name.find("experts") != -1:
  3586. n_experts = self.hparams["num_experts"]
  3587. assert bid is not None
  3588. if self._experts is None:
  3589. self._experts = [{} for _ in range(self.block_count)]
  3590. self._experts[bid][name] = data_torch
  3591. if len(self._experts[bid]) >= n_experts * 3:
  3592. tensors: list[tuple[str, Tensor]] = []
  3593. # merge the experts into a single 3d tensor
  3594. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3595. datas: list[Tensor] = []
  3596. for xid in range(n_experts):
  3597. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3598. datas.append(self._experts[bid][ename])
  3599. del self._experts[bid][ename]
  3600. data_torch = torch.stack(datas, dim=0)
  3601. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3602. new_name = self.map_tensor_name(merged_name)
  3603. tensors.append((new_name, data_torch))
  3604. return tensors
  3605. else:
  3606. return []
  3607. return [(self.map_tensor_name(name), data_torch)]
  3608. # Copied from: Qwen2MoeModel
  3609. def prepare_tensors(self):
  3610. super().prepare_tensors()
  3611. if self._experts is not None:
  3612. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3613. experts = [k for d in self._experts for k in d.keys()]
  3614. if len(experts) > 0:
  3615. raise ValueError(f"Unprocessed experts: {experts}")
  3616. @ModelBase.register("JinaBertModel", "JinaBertForMaskedLM")
  3617. class JinaBertV2Model(BertModel):
  3618. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  3619. def __init__(self, *args, **kwargs):
  3620. super().__init__(*args, **kwargs)
  3621. self.intermediate_size = self.hparams["intermediate_size"]
  3622. def get_tensors(self):
  3623. for name, data in super().get_tensors():
  3624. if 'gated_layer' in name:
  3625. d1 = data[:self.intermediate_size, :]
  3626. name1 = name.replace('gated_layers', 'gated_layers_w')
  3627. name1 = name1.replace('up_gated_layer', 'gated_layers_v')
  3628. d2 = data[self.intermediate_size:, :]
  3629. name2 = name.replace('gated_layers', 'gated_layers_v')
  3630. name2 = name2.replace('up_gated_layer', 'gated_layers_w')
  3631. yield name1, d1
  3632. yield name2, d2
  3633. continue
  3634. yield name, data
  3635. def set_vocab(self):
  3636. tokenizer_class = 'BertTokenizer'
  3637. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  3638. tokenizer_class = json.load(f)['tokenizer_class']
  3639. if tokenizer_class == 'BertTokenizer':
  3640. super().set_vocab()
  3641. elif tokenizer_class == 'RobertaTokenizer':
  3642. self._set_vocab_gpt2()
  3643. self.gguf_writer.add_token_type_count(2)
  3644. else:
  3645. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  3646. self.gguf_writer.add_add_bos_token(True)
  3647. self.gguf_writer.add_add_eos_token(True)
  3648. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3649. # if name starts with "bert.", remove the prefix
  3650. # e.g. https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  3651. if name.startswith("bert."):
  3652. name = name[5:]
  3653. return super().modify_tensors(data_torch, name, bid)
  3654. @ModelBase.register("OpenELMForCausalLM")
  3655. class OpenELMModel(TextModel):
  3656. model_arch = gguf.MODEL_ARCH.OPENELM
  3657. @staticmethod
  3658. def _make_divisible(v: float | int, divisor: int) -> int:
  3659. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  3660. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  3661. # Make sure that round down does not go down by more than 10%.
  3662. if new_v < 0.9 * v:
  3663. new_v += divisor
  3664. return new_v
  3665. def __init__(self, *args, **kwargs):
  3666. super().__init__(*args, **kwargs)
  3667. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  3668. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  3669. self._n_embd: int = self.hparams["model_dim"]
  3670. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  3671. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  3672. self._ffn_dims: list[int] = [
  3673. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  3674. for multiplier in ffn_multipliers
  3675. ]
  3676. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  3677. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  3678. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  3679. def set_vocab(self):
  3680. try:
  3681. self._set_vocab_sentencepiece()
  3682. except FileNotFoundError:
  3683. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  3684. def set_gguf_parameters(self):
  3685. n_embd = self._n_embd
  3686. head_dim = self.hparams["head_dim"]
  3687. rot_pct = 1.0
  3688. assert self.block_count == len(self._num_kv_heads)
  3689. assert self.block_count == len(self._num_query_heads)
  3690. assert self.block_count == len(self._ffn_dims)
  3691. self.gguf_writer.add_block_count(self.block_count)
  3692. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  3693. self.gguf_writer.add_embedding_length(n_embd)
  3694. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  3695. self.gguf_writer.add_head_count(self._num_query_heads)
  3696. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  3697. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  3698. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  3699. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  3700. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  3701. self.gguf_writer.add_key_length(head_dim)
  3702. self.gguf_writer.add_value_length(head_dim)
  3703. self.gguf_writer.add_file_type(self.ftype)
  3704. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  3705. if "n_layers" in keys:
  3706. return self.hparams["num_transformer_layers"]
  3707. return super().find_hparam(keys, optional)
  3708. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3709. # split ff
  3710. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  3711. ff_dim = self._ffn_dims[bid]
  3712. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  3713. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  3714. return
  3715. yield (self.map_tensor_name(name), data_torch)
  3716. @ModelBase.register("ArcticForCausalLM")
  3717. class ArcticModel(TextModel):
  3718. model_arch = gguf.MODEL_ARCH.ARCTIC
  3719. def set_vocab(self):
  3720. # The reason for using a custom implementation here is that the
  3721. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  3722. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  3723. from sentencepiece import SentencePieceProcessor
  3724. tokenizer_path = self.dir_model / 'tokenizer.model'
  3725. if not tokenizer_path.is_file():
  3726. logger.error(f'Error: Missing {tokenizer_path}')
  3727. sys.exit(1)
  3728. # Read the whole vocabulary from the tokenizer.model file
  3729. tokenizer = SentencePieceProcessor()
  3730. tokenizer.LoadFromFile(str(tokenizer_path))
  3731. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3732. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3733. scores: list[float] = [-10000.0] * vocab_size
  3734. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3735. for token_id in range(tokenizer.vocab_size()):
  3736. piece = tokenizer.IdToPiece(token_id)
  3737. text = piece.encode("utf-8")
  3738. score = tokenizer.GetScore(token_id)
  3739. toktype = SentencePieceTokenTypes.NORMAL
  3740. if tokenizer.IsUnknown(token_id):
  3741. toktype = SentencePieceTokenTypes.UNKNOWN
  3742. elif tokenizer.IsControl(token_id):
  3743. toktype = SentencePieceTokenTypes.CONTROL
  3744. elif tokenizer.IsUnused(token_id):
  3745. toktype = SentencePieceTokenTypes.UNUSED
  3746. elif tokenizer.IsByte(token_id):
  3747. toktype = SentencePieceTokenTypes.BYTE
  3748. tokens[token_id] = text
  3749. scores[token_id] = score
  3750. toktypes[token_id] = toktype
  3751. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  3752. # of information about added/redefined tokens and modify them accordingly.
  3753. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3754. if tokenizer_config_file.is_file():
  3755. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3756. tokenizer_config_json = json.load(f)
  3757. if "added_tokens_decoder" in tokenizer_config_json:
  3758. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  3759. for token_id, token_json in added_tokens_decoder.items():
  3760. token_id = int(token_id)
  3761. if token_id >= vocab_size:
  3762. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  3763. continue
  3764. token_content = token_json["content"]
  3765. token_type = SentencePieceTokenTypes.USER_DEFINED
  3766. token_score = -10000.0
  3767. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  3768. # Set the score to 0.0 as in the original tokenizer.model
  3769. if ("special" in token_json) and token_json["special"]:
  3770. if token_content == tokenizer_config_json["unk_token"]:
  3771. token_type = SentencePieceTokenTypes.UNKNOWN
  3772. else:
  3773. token_type = SentencePieceTokenTypes.CONTROL
  3774. token_score = 0.0
  3775. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  3776. tokens[token_id] = token_content.encode("utf-8")
  3777. toktypes[token_id] = token_type
  3778. scores[token_id] = token_score
  3779. self.gguf_writer.add_tokenizer_model("llama")
  3780. self.gguf_writer.add_tokenizer_pre("default")
  3781. self.gguf_writer.add_token_list(tokens)
  3782. self.gguf_writer.add_token_scores(scores)
  3783. self.gguf_writer.add_token_types(toktypes)
  3784. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3785. special_vocab.add_to_gguf(self.gguf_writer)
  3786. def set_gguf_parameters(self):
  3787. super().set_gguf_parameters()
  3788. hparams = self.hparams
  3789. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3790. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  3791. _experts: list[dict[str, Tensor]] | None = None
  3792. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3793. n_head = self.hparams["num_attention_heads"]
  3794. n_kv_head = self.hparams.get("num_key_value_heads")
  3795. if name.endswith("q_proj.weight"):
  3796. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3797. if name.endswith("k_proj.weight"):
  3798. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3799. # process the experts separately
  3800. if name.find("block_sparse_moe.experts") != -1:
  3801. n_experts = self.hparams["num_local_experts"]
  3802. assert bid is not None
  3803. if self._experts is None:
  3804. self._experts = [{} for _ in range(self.block_count)]
  3805. self._experts[bid][name] = data_torch
  3806. if len(self._experts[bid]) >= n_experts * 3:
  3807. tensors: list[tuple[str, Tensor]] = []
  3808. # merge the experts into a single 3d tensor
  3809. for wid in ["w1", "w2", "w3"]:
  3810. datas: list[Tensor] = []
  3811. for xid in range(n_experts):
  3812. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  3813. datas.append(self._experts[bid][ename])
  3814. del self._experts[bid][ename]
  3815. data_torch = torch.stack(datas, dim=0)
  3816. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  3817. new_name = self.map_tensor_name(merged_name)
  3818. tensors.append((new_name, data_torch))
  3819. return tensors
  3820. else:
  3821. return []
  3822. return [(self.map_tensor_name(name), data_torch)]
  3823. def prepare_tensors(self):
  3824. super().prepare_tensors()
  3825. if self._experts is not None:
  3826. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3827. experts = [k for d in self._experts for k in d.keys()]
  3828. if len(experts) > 0:
  3829. raise ValueError(f"Unprocessed experts: {experts}")
  3830. @ModelBase.register("DeepseekForCausalLM")
  3831. class DeepseekModel(TextModel):
  3832. model_arch = gguf.MODEL_ARCH.DEEPSEEK
  3833. def set_vocab(self):
  3834. try:
  3835. self._set_vocab_sentencepiece()
  3836. except FileNotFoundError:
  3837. self._set_vocab_gpt2()
  3838. def set_gguf_parameters(self):
  3839. super().set_gguf_parameters()
  3840. hparams = self.hparams
  3841. if "head_dim" in hparams:
  3842. rope_dim = hparams["head_dim"]
  3843. else:
  3844. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  3845. self.gguf_writer.add_rope_dimension_count(rope_dim)
  3846. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3847. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  3848. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3849. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  3850. self.gguf_writer.add_expert_weights_scale(1.0)
  3851. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  3852. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  3853. _experts: list[dict[str, Tensor]] | None = None
  3854. @staticmethod
  3855. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  3856. if n_head_kv is not None and n_head != n_head_kv:
  3857. n_head = n_head_kv
  3858. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  3859. .swapaxes(1, 2)
  3860. .reshape(weights.shape))
  3861. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3862. n_head = self.hparams["num_attention_heads"]
  3863. n_kv_head = self.hparams.get("num_key_value_heads")
  3864. if name.endswith(("q_proj.weight", "q_proj.bias")):
  3865. data_torch = DeepseekModel.permute(data_torch, n_head, n_head)
  3866. if name.endswith(("k_proj.weight", "k_proj.bias")):
  3867. data_torch = DeepseekModel.permute(data_torch, n_head, n_kv_head)
  3868. # process the experts separately
  3869. if name.find("mlp.experts") != -1:
  3870. n_experts = self.hparams["n_routed_experts"]
  3871. assert bid is not None
  3872. if self._experts is None:
  3873. self._experts = [{} for _ in range(self.block_count)]
  3874. self._experts[bid][name] = data_torch
  3875. if len(self._experts[bid]) >= n_experts * 3:
  3876. tensors: list[tuple[str, Tensor]] = []
  3877. # merge the experts into a single 3d tensor
  3878. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3879. datas: list[Tensor] = []
  3880. for xid in range(n_experts):
  3881. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3882. datas.append(self._experts[bid][ename])
  3883. del self._experts[bid][ename]
  3884. data_torch = torch.stack(datas, dim=0)
  3885. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3886. new_name = self.map_tensor_name(merged_name)
  3887. tensors.append((new_name, data_torch))
  3888. return tensors
  3889. else:
  3890. return []
  3891. return [(self.map_tensor_name(name), data_torch)]
  3892. def prepare_tensors(self):
  3893. super().prepare_tensors()
  3894. if self._experts is not None:
  3895. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3896. experts = [k for d in self._experts for k in d.keys()]
  3897. if len(experts) > 0:
  3898. raise ValueError(f"Unprocessed experts: {experts}")
  3899. @ModelBase.register("DeepseekV2ForCausalLM")
  3900. @ModelBase.register("DeepseekV3ForCausalLM")
  3901. class DeepseekV2Model(TextModel):
  3902. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  3903. def set_vocab(self):
  3904. self._set_vocab_gpt2()
  3905. def set_gguf_parameters(self):
  3906. # note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
  3907. self.hparams["num_key_value_heads"] = 1
  3908. super().set_gguf_parameters()
  3909. hparams = self.hparams
  3910. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  3911. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3912. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  3913. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  3914. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  3915. # note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
  3916. self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
  3917. self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
  3918. self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  3919. self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
  3920. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  3921. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  3922. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  3923. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  3924. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  3925. if hparams["scoring_func"] == "sigmoid":
  3926. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  3927. elif hparams["scoring_func"] == "softmax":
  3928. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  3929. else:
  3930. raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
  3931. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  3932. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  3933. if self.hparams["rope_scaling"].get("type") == "yarn":
  3934. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  3935. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  3936. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  3937. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * hparams["rope_scaling"]["mscale_all_dim"])
  3938. _experts: list[dict[str, Tensor]] | None = None
  3939. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3940. # rename e_score_correction_bias tensors
  3941. if name.endswith("e_score_correction_bias"):
  3942. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  3943. # skip Multi-Token Prediction (MTP) layers
  3944. block_count = self.hparams["num_hidden_layers"]
  3945. match = re.match(r"model.layers.(\d+)", name)
  3946. if match and int(match.group(1)) >= block_count:
  3947. return []
  3948. # process the experts separately
  3949. if name.find("mlp.experts") != -1:
  3950. n_experts = self.hparams["n_routed_experts"]
  3951. assert bid is not None
  3952. if self._experts is None:
  3953. self._experts = [{} for _ in range(self.block_count)]
  3954. self._experts[bid][name] = data_torch
  3955. if len(self._experts[bid]) >= n_experts * 3:
  3956. tensors: list[tuple[str, Tensor]] = []
  3957. # merge the experts into a single 3d tensor
  3958. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3959. datas: list[Tensor] = []
  3960. for xid in range(n_experts):
  3961. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3962. datas.append(self._experts[bid][ename])
  3963. del self._experts[bid][ename]
  3964. data_torch = torch.stack(datas, dim=0)
  3965. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3966. new_name = self.map_tensor_name(merged_name)
  3967. tensors.append((new_name, data_torch))
  3968. return tensors
  3969. else:
  3970. return []
  3971. # note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
  3972. if name.endswith("kv_b_proj.weight"):
  3973. name_kb = name.replace("kv_b_proj", "k_b_proj")
  3974. name_vb = name.replace("kv_b_proj", "v_b_proj")
  3975. n_head_kv = self.hparams["num_key_value_heads"]
  3976. v_head_dim = self.hparams["v_head_dim"]
  3977. qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
  3978. assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
  3979. kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
  3980. k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
  3981. k_b = k_b.transpose(1, 2)
  3982. return [
  3983. (self.map_tensor_name(name_kb), k_b),
  3984. (self.map_tensor_name(name_vb), v_b)
  3985. ]
  3986. return [(self.map_tensor_name(name), data_torch)]
  3987. def prepare_tensors(self):
  3988. super().prepare_tensors()
  3989. if self._experts is not None:
  3990. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3991. experts = [k for d in self._experts for k in d.keys()]
  3992. if len(experts) > 0:
  3993. raise ValueError(f"Unprocessed experts: {experts}")
  3994. @ModelBase.register("PLMForCausalLM")
  3995. class PLMModel(TextModel):
  3996. model_arch = gguf.MODEL_ARCH.PLM
  3997. def set_vocab(self):
  3998. self._set_vocab_gpt2()
  3999. def set_gguf_parameters(self):
  4000. super().set_gguf_parameters()
  4001. hparams = self.hparams
  4002. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4003. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  4004. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  4005. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  4006. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  4007. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4008. return [(self.map_tensor_name(name), data_torch)]
  4009. def prepare_tensors(self):
  4010. super().prepare_tensors()
  4011. @ModelBase.register("T5WithLMHeadModel")
  4012. @ModelBase.register("T5ForConditionalGeneration")
  4013. @ModelBase.register("MT5ForConditionalGeneration")
  4014. @ModelBase.register("UMT5ForConditionalGeneration")
  4015. class T5Model(TextModel):
  4016. model_arch = gguf.MODEL_ARCH.T5
  4017. def __init__(self, *args, **kwargs):
  4018. super().__init__(*args, **kwargs)
  4019. self.shared_token_embeddings_found = False
  4020. def set_vocab(self):
  4021. # to avoid TypeError: Descriptors cannot be created directly
  4022. # exception when importing sentencepiece_model_pb2
  4023. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  4024. from sentencepiece import SentencePieceProcessor
  4025. from sentencepiece import sentencepiece_model_pb2 as model
  4026. tokenizer_path = self.dir_model / 'tokenizer.model'
  4027. # many older models use spiece.model tokenizer model filename
  4028. if not tokenizer_path.is_file():
  4029. tokenizer_path = self.dir_model / 'spiece.model'
  4030. if not tokenizer_path.is_file():
  4031. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  4032. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  4033. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  4034. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  4035. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  4036. # assure the tokenizer model file name is correct
  4037. assert tokenizer_path.name == 'tokenizer.model'
  4038. return self._set_vocab_sentencepiece()
  4039. else:
  4040. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  4041. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  4042. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  4043. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  4044. tokenizer = SentencePieceProcessor()
  4045. tokenizer.LoadFromFile(str(tokenizer_path))
  4046. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  4047. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4048. scores: list[float] = [-10000.0] * vocab_size
  4049. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4050. for token_id in range(tokenizer.vocab_size()):
  4051. piece = tokenizer.IdToPiece(token_id)
  4052. text = piece.encode("utf-8")
  4053. score = tokenizer.GetScore(token_id)
  4054. toktype = SentencePieceTokenTypes.NORMAL
  4055. if tokenizer.IsUnknown(token_id):
  4056. toktype = SentencePieceTokenTypes.UNKNOWN
  4057. elif tokenizer.IsControl(token_id):
  4058. toktype = SentencePieceTokenTypes.CONTROL
  4059. elif tokenizer.IsUnused(token_id):
  4060. toktype = SentencePieceTokenTypes.UNUSED
  4061. elif tokenizer.IsByte(token_id):
  4062. toktype = SentencePieceTokenTypes.BYTE
  4063. tokens[token_id] = text
  4064. scores[token_id] = score
  4065. toktypes[token_id] = toktype
  4066. added_tokens_file = self.dir_model / 'added_tokens.json'
  4067. if added_tokens_file.is_file():
  4068. with open(added_tokens_file, "r", encoding="utf-8") as f:
  4069. added_tokens_json = json.load(f)
  4070. for key in added_tokens_json:
  4071. token_id = added_tokens_json[key]
  4072. if token_id >= vocab_size:
  4073. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  4074. continue
  4075. tokens[token_id] = key.encode("utf-8")
  4076. scores[token_id] = -1000.0
  4077. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  4078. if vocab_size > len(tokens):
  4079. pad_count = vocab_size - len(tokens)
  4080. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  4081. for i in range(1, pad_count + 1):
  4082. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  4083. scores.append(-1000.0)
  4084. toktypes.append(SentencePieceTokenTypes.UNUSED)
  4085. self.gguf_writer.add_tokenizer_model("t5")
  4086. self.gguf_writer.add_tokenizer_pre("default")
  4087. self.gguf_writer.add_token_list(tokens)
  4088. self.gguf_writer.add_token_scores(scores)
  4089. self.gguf_writer.add_token_types(toktypes)
  4090. self.gguf_writer.add_add_space_prefix(add_prefix)
  4091. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  4092. if precompiled_charsmap:
  4093. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  4094. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4095. special_vocab.add_to_gguf(self.gguf_writer)
  4096. self.gguf_writer.add_add_bos_token(False)
  4097. self.gguf_writer.add_add_eos_token(True)
  4098. def set_gguf_parameters(self):
  4099. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  4100. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  4101. n_ctx = 512
  4102. self.gguf_writer.add_context_length(n_ctx)
  4103. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  4104. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  4105. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  4106. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  4107. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  4108. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  4109. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4110. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  4111. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  4112. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  4113. self.gguf_writer.add_file_type(self.ftype)
  4114. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4115. del bid # unused
  4116. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  4117. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  4118. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  4119. # and decoder and ignore the remaining ones.
  4120. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  4121. if not self.shared_token_embeddings_found:
  4122. name = "shared.weight"
  4123. self.shared_token_embeddings_found = True
  4124. else:
  4125. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  4126. return []
  4127. return [(self.map_tensor_name(name), data_torch)]
  4128. @ModelBase.register("T5EncoderModel")
  4129. class T5EncoderModel(TextModel):
  4130. model_arch = gguf.MODEL_ARCH.T5ENCODER
  4131. def __init__(self, *args, **kwargs):
  4132. super().__init__(*args, **kwargs)
  4133. self.shared_token_embeddings_found = False
  4134. def set_vocab(self):
  4135. # to avoid TypeError: Descriptors cannot be created directly
  4136. # exception when importing sentencepiece_model_pb2
  4137. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  4138. from sentencepiece import SentencePieceProcessor
  4139. from sentencepiece import sentencepiece_model_pb2 as model
  4140. tokenizer_path = self.dir_model / 'tokenizer.model'
  4141. # many older models use spiece.model tokenizer model filename
  4142. if not tokenizer_path.is_file():
  4143. tokenizer_path = self.dir_model / 'spiece.model'
  4144. if not tokenizer_path.is_file():
  4145. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  4146. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  4147. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  4148. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  4149. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  4150. # assure the tokenizer model file name is correct
  4151. assert tokenizer_path.name == 'tokenizer.model'
  4152. return self._set_vocab_sentencepiece()
  4153. else:
  4154. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  4155. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  4156. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  4157. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  4158. tokenizer = SentencePieceProcessor()
  4159. tokenizer.LoadFromFile(str(tokenizer_path))
  4160. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  4161. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4162. scores: list[float] = [-10000.0] * vocab_size
  4163. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4164. for token_id in range(tokenizer.vocab_size()):
  4165. piece = tokenizer.IdToPiece(token_id)
  4166. text = piece.encode("utf-8")
  4167. score = tokenizer.GetScore(token_id)
  4168. toktype = SentencePieceTokenTypes.NORMAL
  4169. if tokenizer.IsUnknown(token_id):
  4170. toktype = SentencePieceTokenTypes.UNKNOWN
  4171. elif tokenizer.IsControl(token_id):
  4172. toktype = SentencePieceTokenTypes.CONTROL
  4173. elif tokenizer.IsUnused(token_id):
  4174. toktype = SentencePieceTokenTypes.UNUSED
  4175. elif tokenizer.IsByte(token_id):
  4176. toktype = SentencePieceTokenTypes.BYTE
  4177. tokens[token_id] = text
  4178. scores[token_id] = score
  4179. toktypes[token_id] = toktype
  4180. added_tokens_file = self.dir_model / 'added_tokens.json'
  4181. if added_tokens_file.is_file():
  4182. with open(added_tokens_file, "r", encoding="utf-8") as f:
  4183. added_tokens_json = json.load(f)
  4184. for key in added_tokens_json:
  4185. token_id = added_tokens_json[key]
  4186. if token_id >= vocab_size:
  4187. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  4188. continue
  4189. tokens[token_id] = key.encode("utf-8")
  4190. scores[token_id] = -1000.0
  4191. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  4192. if vocab_size > len(tokens):
  4193. pad_count = vocab_size - len(tokens)
  4194. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  4195. for i in range(1, pad_count + 1):
  4196. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  4197. scores.append(-1000.0)
  4198. toktypes.append(SentencePieceTokenTypes.UNUSED)
  4199. self.gguf_writer.add_tokenizer_model("t5")
  4200. self.gguf_writer.add_tokenizer_pre("default")
  4201. self.gguf_writer.add_token_list(tokens)
  4202. self.gguf_writer.add_token_scores(scores)
  4203. self.gguf_writer.add_token_types(toktypes)
  4204. self.gguf_writer.add_add_space_prefix(add_prefix)
  4205. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  4206. if precompiled_charsmap:
  4207. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  4208. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4209. special_vocab.add_to_gguf(self.gguf_writer)
  4210. self.gguf_writer.add_add_bos_token(False)
  4211. self.gguf_writer.add_add_eos_token(True)
  4212. def set_gguf_parameters(self):
  4213. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  4214. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  4215. n_ctx = 512
  4216. self.gguf_writer.add_context_length(n_ctx)
  4217. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  4218. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  4219. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  4220. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  4221. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  4222. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  4223. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4224. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  4225. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  4226. self.gguf_writer.add_file_type(self.ftype)
  4227. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4228. del bid # unused
  4229. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  4230. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  4231. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  4232. # and decoder and ignore the remaining ones.
  4233. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  4234. if not self.shared_token_embeddings_found:
  4235. name = "shared.weight"
  4236. self.shared_token_embeddings_found = True
  4237. else:
  4238. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  4239. return []
  4240. return [(self.map_tensor_name(name), data_torch)]
  4241. @ModelBase.register("JAISLMHeadModel")
  4242. class JaisModel(TextModel):
  4243. model_arch = gguf.MODEL_ARCH.JAIS
  4244. def __init__(self, *args, **kwargs):
  4245. super().__init__(*args, **kwargs)
  4246. # SwigLU activation
  4247. assert self.hparams["activation_function"] == "swiglu"
  4248. # ALiBi position embedding
  4249. assert self.hparams["position_embedding_type"] == "alibi"
  4250. # Embeddings scale
  4251. self.embeddings_scale = 1.0
  4252. if 'mup_embeddings_scale' in self.hparams:
  4253. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  4254. elif 'embeddings_scale' in self.hparams:
  4255. self.embeddings_scale = self.hparams['embeddings_scale']
  4256. else:
  4257. assert False
  4258. self.width_scale = 1.0
  4259. if 'mup_output_alpha' in self.hparams:
  4260. assert 'mup_width_scale' in self.hparams
  4261. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  4262. elif 'width_scale' in self.hparams:
  4263. self.width_scale = self.hparams['width_scale']
  4264. else:
  4265. assert False
  4266. self.max_alibi_bias = 8.0
  4267. def set_vocab(self):
  4268. self._set_vocab_gpt2()
  4269. def set_gguf_parameters(self):
  4270. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  4271. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  4272. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  4273. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  4274. self.gguf_writer.add_head_count(self.hparams["n_head"])
  4275. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4276. self.gguf_writer.add_file_type(self.ftype)
  4277. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4278. del bid # unused
  4279. tensors: list[tuple[str, Tensor]] = []
  4280. # we don't need these
  4281. if name.endswith((".attn.bias")):
  4282. return tensors
  4283. if name.endswith(("relative_pe.slopes")):
  4284. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  4285. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  4286. # but Jais's PyTorch model simply precalculates the slope values and places them
  4287. # in relative_pes.slopes
  4288. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  4289. first_val = float(data_torch[0].item())
  4290. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  4291. return tensors
  4292. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  4293. data_torch = data_torch.transpose(1, 0)
  4294. new_name = self.map_tensor_name(name)
  4295. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  4296. tensors.append((new_name, data_torch * self.embeddings_scale))
  4297. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  4298. tensors.append((new_name, data_torch * self.width_scale))
  4299. else:
  4300. tensors.append((new_name, data_torch))
  4301. return tensors
  4302. def prepare_tensors(self):
  4303. super().prepare_tensors()
  4304. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  4305. @ModelBase.register("Glm4ForCausalLM")
  4306. class Glm4Model(TextModel):
  4307. model_arch = gguf.MODEL_ARCH.GLM4
  4308. def set_vocab(self):
  4309. from transformers import AutoTokenizer
  4310. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  4311. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4312. tokens, toktypes, tokpre = self.get_vocab_base()
  4313. self.gguf_writer.add_tokenizer_model("gpt2")
  4314. self.gguf_writer.add_tokenizer_pre(tokpre)
  4315. self.gguf_writer.add_token_list(tokens)
  4316. self.gguf_writer.add_token_types(toktypes)
  4317. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4318. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4319. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  4320. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  4321. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4322. special_vocab.add_to_gguf(self.gguf_writer)
  4323. def set_gguf_parameters(self):
  4324. super().set_gguf_parameters()
  4325. rope_dim = self.hparams["head_dim"]
  4326. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  4327. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  4328. if self.hparams["rope_scaling"].get("type") == "yarn":
  4329. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4330. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  4331. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  4332. @ModelBase.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
  4333. class ChatGLMModel(TextModel):
  4334. model_arch = gguf.MODEL_ARCH.CHATGLM
  4335. def set_vocab_chatglm3(self):
  4336. dir_model = self.dir_model
  4337. hparams = self.hparams
  4338. tokens: list[bytes] = []
  4339. toktypes: list[int] = []
  4340. scores: list[float] = []
  4341. from transformers import AutoTokenizer
  4342. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  4343. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  4344. assert max(tokenizer.get_vocab().values()) < vocab_size
  4345. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  4346. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  4347. for token_id in range(vocab_size):
  4348. piece = tokenizer._convert_id_to_token(token_id)
  4349. if token_id == 0:
  4350. piece = "<unk>"
  4351. elif token_id == 1:
  4352. piece = "<bos>"
  4353. elif token_id == 2:
  4354. piece = "<eos>"
  4355. text = piece.encode("utf-8")
  4356. score = 0.0
  4357. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  4358. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  4359. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  4360. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  4361. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  4362. if piece in special_tokens:
  4363. toktype = SentencePieceTokenTypes.CONTROL
  4364. elif len(piece) == 0:
  4365. text = f"[PAD{token_id}]".encode("utf-8")
  4366. toktype = SentencePieceTokenTypes.UNUSED
  4367. else:
  4368. toktype = SentencePieceTokenTypes.USER_DEFINED
  4369. tokens.append(text)
  4370. scores.append(score)
  4371. toktypes.append(toktype)
  4372. continue
  4373. toktype = SentencePieceTokenTypes.NORMAL
  4374. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  4375. toktype = SentencePieceTokenTypes.UNKNOWN
  4376. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  4377. toktype = SentencePieceTokenTypes.CONTROL
  4378. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  4379. toktype = SentencePieceTokenTypes.UNUSED
  4380. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  4381. toktype = SentencePieceTokenTypes.BYTE
  4382. tokens.append(text)
  4383. scores.append(score)
  4384. toktypes.append(toktype)
  4385. self.gguf_writer.add_tokenizer_model("llama")
  4386. # glm3 needs prefix and suffix formatted as:
  4387. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  4388. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  4389. self.gguf_writer.add_token_list(tokens)
  4390. self.gguf_writer.add_token_scores(scores)
  4391. self.gguf_writer.add_token_types(toktypes)
  4392. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4393. special_vocab.add_to_gguf(self.gguf_writer)
  4394. @staticmethod
  4395. def token_bytes_to_string(b):
  4396. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  4397. byte_encoder = bytes_to_unicode()
  4398. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  4399. @staticmethod
  4400. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  4401. parts = [bytes([b]) for b in token]
  4402. while True:
  4403. min_idx = None
  4404. min_rank = None
  4405. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  4406. rank = mergeable_ranks.get(pair[0] + pair[1])
  4407. if rank is not None and (min_rank is None or rank < min_rank):
  4408. min_idx = i
  4409. min_rank = rank
  4410. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  4411. break
  4412. assert min_idx is not None
  4413. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  4414. return parts
  4415. def set_vocab(self):
  4416. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  4417. self.set_vocab_chatglm3()
  4418. return
  4419. dir_model = self.dir_model
  4420. hparams = self.hparams
  4421. tokens: list[str] = []
  4422. toktypes: list[int] = []
  4423. from transformers import AutoTokenizer
  4424. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  4425. vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"])
  4426. assert max(tokenizer.get_vocab().values()) < vocab_size
  4427. tokens, toktypes, tokpre = self.get_vocab_base()
  4428. self.gguf_writer.add_tokenizer_model("gpt2")
  4429. self.gguf_writer.add_tokenizer_pre(tokpre)
  4430. self.gguf_writer.add_token_list(tokens)
  4431. self.gguf_writer.add_token_types(toktypes)
  4432. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4433. # only add special tokens when they were not already loaded from config.json
  4434. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4435. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  4436. # this one is usually not in config.json anyway
  4437. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  4438. special_vocab.add_to_gguf(self.gguf_writer)
  4439. def set_gguf_parameters(self):
  4440. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  4441. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  4442. n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head))
  4443. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  4444. self.gguf_writer.add_embedding_length(n_embed)
  4445. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed)))
  4446. self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"]))
  4447. self.gguf_writer.add_head_count(n_head)
  4448. self.gguf_writer.add_head_count_kv(n_head_kv)
  4449. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5))
  4450. self.gguf_writer.add_file_type(self.ftype)
  4451. if "attention_dim" in self.hparams:
  4452. rope_dim = self.hparams["attention_dim"]
  4453. else:
  4454. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  4455. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  4456. self.gguf_writer.add_add_bos_token(False)
  4457. rope_freq = 10000
  4458. if "rope_ratio" in self.hparams:
  4459. rope_freq = rope_freq * self.hparams["rope_ratio"]
  4460. self.gguf_writer.add_rope_freq_base(rope_freq)
  4461. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4462. del bid # unused
  4463. if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."):
  4464. return []
  4465. name = name.removeprefix("transformer.")
  4466. return [(self.map_tensor_name(name), data_torch)]
  4467. @ModelBase.register("NemotronForCausalLM")
  4468. class NemotronModel(TextModel):
  4469. model_arch = gguf.MODEL_ARCH.NEMOTRON
  4470. def set_vocab(self):
  4471. self._set_vocab_sentencepiece()
  4472. self.gguf_writer.add_pad_token_id(0)
  4473. self.gguf_writer.add_unk_token_id(1)
  4474. def set_gguf_parameters(self):
  4475. super().set_gguf_parameters()
  4476. hparams = self.hparams
  4477. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4478. f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
  4479. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  4480. # * Partial RoPE
  4481. rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
  4482. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  4483. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  4484. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  4485. # * RopeScaling for Nemotron
  4486. if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
  4487. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4488. else:
  4489. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4490. self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
  4491. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4492. # * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
  4493. # model.layers.{l}.input_layernorm.weight
  4494. # model.layers.{l}.post_attention_layernorm.weight
  4495. # model.norm.weight
  4496. if name.endswith("norm.weight"):
  4497. data_torch = data_torch + 1
  4498. return [(self.map_tensor_name(name), data_torch)]
  4499. @ModelBase.register("ExaoneForCausalLM")
  4500. class ExaoneModel(TextModel):
  4501. model_arch = gguf.MODEL_ARCH.EXAONE
  4502. def set_gguf_parameters(self):
  4503. hparams = self.hparams
  4504. assert (hparams["activation_function"] == "silu")
  4505. max_position_embeddings = hparams["max_position_embeddings"]
  4506. embed_dim = hparams["hidden_size"]
  4507. num_heads = hparams["num_attention_heads"]
  4508. num_kv_heads = hparams.get("num_key_value_heads", num_heads)
  4509. layer_norm_eps = hparams["layer_norm_epsilon"]
  4510. intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
  4511. num_layers = hparams["num_layers"]
  4512. # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
  4513. # attention_dropout_rate = hparams["attention_dropout"]
  4514. # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
  4515. # embed_dropout_rate = hparams["embed_dropout"]
  4516. self.gguf_writer.add_embedding_length(embed_dim)
  4517. self.gguf_writer.add_head_count(num_heads)
  4518. self.gguf_writer.add_head_count_kv(num_kv_heads)
  4519. self.gguf_writer.add_context_length(max_position_embeddings)
  4520. self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
  4521. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4522. self.gguf_writer.add_block_count(num_layers)
  4523. self.gguf_writer.add_file_type(self.ftype)
  4524. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  4525. self.gguf_writer.add_rope_freq_base(rope_theta)
  4526. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
  4527. rotary_factor = rotary_factor if rotary_factor is not None else 1.0
  4528. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  4529. if hparams.get("rope_scaling") is not None and "factor" in hparams["rope_scaling"]:
  4530. if hparams["rope_scaling"].get("type") == "linear":
  4531. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4532. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  4533. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  4534. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  4535. if rope_scaling.get("rope_type", '').lower() == "llama3":
  4536. base = self.hparams.get("rope_theta", 10000.0)
  4537. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  4538. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  4539. factor = rope_scaling.get("factor", 8.0)
  4540. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  4541. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  4542. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  4543. low_freq_wavelen = old_context_len / low_freq_factor
  4544. high_freq_wavelen = old_context_len / high_freq_factor
  4545. assert low_freq_wavelen != high_freq_wavelen
  4546. rope_factors = []
  4547. for freq in freqs:
  4548. wavelen = 2 * math.pi / freq
  4549. if wavelen < high_freq_wavelen:
  4550. rope_factors.append(1)
  4551. elif wavelen > low_freq_wavelen:
  4552. rope_factors.append(factor)
  4553. else:
  4554. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  4555. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  4556. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  4557. @ModelBase.register("GraniteForCausalLM")
  4558. class GraniteModel(LlamaModel):
  4559. """Conversion for IBM's GraniteForCausalLM"""
  4560. model_arch = gguf.MODEL_ARCH.GRANITE
  4561. def set_gguf_parameters(self):
  4562. """Granite uses standard llama parameters with the following differences:
  4563. - No head_dim support
  4564. - New multiplier params:
  4565. - attention_scale
  4566. - embedding_scale
  4567. - residual_scale
  4568. - logits_scaling
  4569. """
  4570. if head_dim := self.hparams.pop("head_dim", None):
  4571. logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
  4572. super().set_gguf_parameters()
  4573. # NOTE: Convert _multiplier params to _scale params for naming
  4574. # consistency
  4575. if attention_scale := self.hparams.get("attention_multiplier"):
  4576. self.gguf_writer.add_attention_scale(attention_scale)
  4577. logger.info("gguf: (granite) attention_scale = %s", attention_scale)
  4578. if embedding_scale := self.hparams.get("embedding_multiplier"):
  4579. self.gguf_writer.add_embedding_scale(embedding_scale)
  4580. logger.info("gguf: (granite) embedding_scale = %s", embedding_scale)
  4581. if residual_scale := self.hparams.get("residual_multiplier"):
  4582. self.gguf_writer.add_residual_scale(residual_scale)
  4583. logger.info("gguf: (granite) residual_scale = %s", residual_scale)
  4584. if logits_scale := self.hparams.get("logits_scaling"):
  4585. self.gguf_writer.add_logit_scale(logits_scale)
  4586. logger.info("gguf: (granite) logits_scale = %s", logits_scale)
  4587. @ModelBase.register("GraniteMoeForCausalLM")
  4588. class GraniteMoeModel(GraniteModel):
  4589. """Conversion for IBM's GraniteMoeForCausalLM"""
  4590. model_arch = gguf.MODEL_ARCH.GRANITE_MOE
  4591. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4592. """In modeling_granitemoe, the JetMoe implementation of parallel experts
  4593. is used. This essentially merges w1 and w3 into a single tensor with 2x
  4594. the hidden size that is then split during forward. To keep compatibility
  4595. with existing mixtral support, we pull them apart here.
  4596. """
  4597. if name.endswith("block_sparse_moe.input_linear.weight"):
  4598. ffn_dim = self.hparams["intermediate_size"]
  4599. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size"
  4600. gate, up = data_torch[..., :ffn_dim, :], data_torch[..., ffn_dim:, :]
  4601. return [
  4602. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate),
  4603. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up),
  4604. ]
  4605. return super().modify_tensors(data_torch, name, bid)
  4606. @ModelBase.register("BailingMoeForCausalLM")
  4607. class BailingMoeModel(TextModel):
  4608. model_arch = gguf.MODEL_ARCH.BAILINGMOE
  4609. def set_vocab(self):
  4610. self._set_vocab_gpt2()
  4611. def set_gguf_parameters(self):
  4612. super().set_gguf_parameters()
  4613. hparams = self.hparams
  4614. rope_dim = hparams.get("head_dim") or hparams["hidden_size"] // hparams["num_attention_heads"]
  4615. self.gguf_writer.add_rope_dimension_count(rope_dim)
  4616. if (self.hparams.get("rope_scaling") or {}).get("type") == "yarn" and "factor" in self.hparams["rope_scaling"]:
  4617. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4618. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  4619. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  4620. else:
  4621. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4622. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  4623. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4624. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  4625. self.gguf_writer.add_expert_weights_scale(1.0)
  4626. self.gguf_writer.add_expert_count(hparams["num_experts"])
  4627. self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
  4628. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  4629. _experts: list[dict[str, Tensor]] | None = None
  4630. @staticmethod
  4631. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  4632. if n_head_kv is not None and n_head != n_head_kv:
  4633. n_head = n_head_kv
  4634. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  4635. .swapaxes(1, 2)
  4636. .reshape(weights.shape))
  4637. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4638. n_head = self.hparams["num_attention_heads"]
  4639. n_kv_head = self.hparams.get("num_key_value_heads")
  4640. n_embd = self.hparams["hidden_size"]
  4641. head_dim = self.hparams.get("head_dim") or n_embd // n_head
  4642. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  4643. if name.endswith("attention.dense.weight"):
  4644. return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), data_torch)]
  4645. elif name.endswith("query_key_value.weight"):
  4646. q, k, v = data_torch.split([n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim], dim=-2)
  4647. return [
  4648. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), BailingMoeModel.permute(q, n_head, n_head)),
  4649. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), BailingMoeModel.permute(k, n_head, n_kv_head)),
  4650. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v)
  4651. ]
  4652. elif name.find("mlp.experts") != -1:
  4653. n_experts = self.hparams["num_experts"]
  4654. assert bid is not None
  4655. tensors: list[tuple[str, Tensor]] = []
  4656. if self._experts is None:
  4657. self._experts = [{} for _ in range(self.block_count)]
  4658. self._experts[bid][name] = data_torch
  4659. if len(self._experts[bid]) >= n_experts * 3:
  4660. # merge the experts into a single 3d tensor
  4661. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4662. datas: list[Tensor] = []
  4663. for xid in range(n_experts):
  4664. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4665. datas.append(self._experts[bid][ename])
  4666. del self._experts[bid][ename]
  4667. data_torch = torch.stack(datas, dim=0)
  4668. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  4669. new_name = self.map_tensor_name(merged_name)
  4670. tensors.append((new_name, data_torch))
  4671. return tensors
  4672. new_name = self.map_tensor_name(name)
  4673. if new_name == output_name and self.hparams.get("norm_head"):
  4674. data_torch = data_torch.float()
  4675. data_torch /= torch.norm(data_torch, p=2, dim=0, keepdim=True) + 1e-7
  4676. return [(new_name, data_torch)]
  4677. def prepare_tensors(self):
  4678. super().prepare_tensors()
  4679. if self._experts is not None:
  4680. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4681. experts = [k for d in self._experts for k in d.keys()]
  4682. if len(experts) > 0:
  4683. raise ValueError(f"Unprocessed experts: {experts}")
  4684. @ModelBase.register("ChameleonForConditionalGeneration")
  4685. @ModelBase.register("ChameleonForCausalLM") # obsolete
  4686. class ChameleonModel(TextModel):
  4687. model_arch = gguf.MODEL_ARCH.CHAMELEON
  4688. def set_gguf_parameters(self):
  4689. super().set_gguf_parameters()
  4690. self.gguf_writer.add_swin_norm(self.hparams.get("swin_norm", False))
  4691. def set_vocab(self):
  4692. self._set_vocab_gpt2()
  4693. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4694. # ignore image tokenizer for now
  4695. # TODO: remove this once image support is implemented for Chameleon
  4696. if name.startswith("model.vqmodel"):
  4697. return []
  4698. n_head = self.hparams["num_attention_heads"]
  4699. n_kv_head = self.hparams.get("num_key_value_heads")
  4700. hidden_dim = self.hparams.get("hidden_size")
  4701. if name.endswith(("q_proj.weight", "q_proj.bias")):
  4702. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  4703. if name.endswith(("k_proj.weight", "k_proj.bias")):
  4704. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  4705. if name.endswith(("q_norm.weight", "q_norm.bias")):
  4706. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim)
  4707. if name.endswith(("k_norm.weight", "k_norm.bias")):
  4708. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim)
  4709. return [(self.map_tensor_name(name), data_torch)]
  4710. # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203
  4711. @staticmethod
  4712. def _reverse_hf_permute(data_torch, n_heads, hidden_dim):
  4713. head_dim = hidden_dim // n_heads
  4714. data_torch = data_torch[0].view(2, head_dim // 2).t().reshape(1, -1)
  4715. data_torch = data_torch.repeat_interleave(n_heads, 0)
  4716. return data_torch
  4717. ###### CONVERSION LOGIC ######
  4718. # tree of lazy tensors
  4719. class LazyTorchTensor(gguf.LazyBase):
  4720. _tensor_type = torch.Tensor
  4721. # to keep the type-checker happy
  4722. dtype: torch.dtype
  4723. shape: torch.Size
  4724. # only used when converting a torch.Tensor to a np.ndarray
  4725. _dtype_map: dict[torch.dtype, type] = {
  4726. torch.float16: np.float16,
  4727. torch.float32: np.float32,
  4728. }
  4729. # used for safetensors slices
  4730. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  4731. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  4732. _dtype_str_map: dict[str, torch.dtype] = {
  4733. "F64": torch.float64,
  4734. "F32": torch.float32,
  4735. "BF16": torch.bfloat16,
  4736. "F16": torch.float16,
  4737. # "U64": torch.uint64,
  4738. "I64": torch.int64,
  4739. # "U32": torch.uint32,
  4740. "I32": torch.int32,
  4741. # "U16": torch.uint16,
  4742. "I16": torch.int16,
  4743. "U8": torch.uint8,
  4744. "I8": torch.int8,
  4745. "BOOL": torch.bool,
  4746. "F8_E4M3": torch.float8_e4m3fn,
  4747. "F8_E5M2": torch.float8_e5m2,
  4748. }
  4749. def numpy(self) -> gguf.LazyNumpyTensor:
  4750. dtype = self._dtype_map[self.dtype]
  4751. return gguf.LazyNumpyTensor(
  4752. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  4753. args=(self,),
  4754. func=(lambda s: s.numpy())
  4755. )
  4756. @classmethod
  4757. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  4758. return torch.empty(size=shape, dtype=dtype, device="meta")
  4759. @classmethod
  4760. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  4761. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  4762. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  4763. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  4764. return cast(torch.Tensor, lazy)
  4765. @classmethod
  4766. def from_remote_tensor(cls, remote_tensor: gguf.utility.RemoteTensor):
  4767. dtype = cls._dtype_str_map[remote_tensor.dtype]
  4768. shape = remote_tensor.shape
  4769. meta = cls.meta_with_dtype_and_shape(dtype, shape)
  4770. lazy = cls(meta=meta, args=(remote_tensor,), func=lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape))
  4771. return cast(torch.Tensor, lazy)
  4772. @classmethod
  4773. def __torch_function__(cls, func, types, args=(), kwargs=None):
  4774. del types # unused
  4775. if kwargs is None:
  4776. kwargs = {}
  4777. if func is torch.Tensor.numpy:
  4778. return args[0].numpy()
  4779. return cls._wrap_fn(func)(*args, **kwargs)
  4780. def parse_args() -> argparse.Namespace:
  4781. parser = argparse.ArgumentParser(
  4782. description="Convert a huggingface model to a GGML compatible file")
  4783. parser.add_argument(
  4784. "--vocab-only", action="store_true",
  4785. help="extract only the vocab",
  4786. )
  4787. parser.add_argument(
  4788. "--outfile", type=Path,
  4789. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  4790. )
  4791. parser.add_argument(
  4792. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
  4793. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  4794. )
  4795. parser.add_argument(
  4796. "--bigendian", action="store_true",
  4797. help="model is executed on big endian machine",
  4798. )
  4799. parser.add_argument(
  4800. "model", type=Path,
  4801. help="directory containing model file",
  4802. nargs="?",
  4803. )
  4804. parser.add_argument(
  4805. "--use-temp-file", action="store_true",
  4806. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  4807. )
  4808. parser.add_argument(
  4809. "--no-lazy", action="store_true",
  4810. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  4811. )
  4812. parser.add_argument(
  4813. "--model-name", type=str, default=None,
  4814. help="name of the model",
  4815. )
  4816. parser.add_argument(
  4817. "--verbose", action="store_true",
  4818. help="increase output verbosity",
  4819. )
  4820. parser.add_argument(
  4821. "--split-max-tensors", type=int, default=0,
  4822. help="max tensors in each split",
  4823. )
  4824. parser.add_argument(
  4825. "--split-max-size", type=str, default="0",
  4826. help="max size per split N(M|G)",
  4827. )
  4828. parser.add_argument(
  4829. "--dry-run", action="store_true",
  4830. help="only print out a split plan and exit, without writing any new files",
  4831. )
  4832. parser.add_argument(
  4833. "--no-tensor-first-split", action="store_true",
  4834. help="do not add tensors to the first split (disabled by default)"
  4835. )
  4836. parser.add_argument(
  4837. "--metadata", type=Path,
  4838. help="Specify the path for an authorship metadata override file"
  4839. )
  4840. parser.add_argument(
  4841. "--print-supported-models", action="store_true",
  4842. help="Print the supported models"
  4843. )
  4844. parser.add_argument(
  4845. "--remote", action="store_true",
  4846. help="(Experimental) Read safetensors file remotely without downloading to disk. Config and tokenizer files will still be downloaded. To use this feature, you need to specify Hugging Face model repo name instead of a local directory. For example: 'HuggingFaceTB/SmolLM2-1.7B-Instruct'. Note: To access gated repo, set HF_TOKEN environment variable to your Hugging Face token.",
  4847. )
  4848. parser.add_argument(
  4849. "--mmproj", action="store_true",
  4850. help="(Experimental) Export multimodal projector (mmproj) for vision models. This will only work on some vision models. A prefix 'mmproj-' will be added to the output file name.",
  4851. )
  4852. args = parser.parse_args()
  4853. if not args.print_supported_models and args.model is None:
  4854. parser.error("the following arguments are required: model")
  4855. return args
  4856. def split_str_to_n_bytes(split_str: str) -> int:
  4857. if split_str.endswith("K"):
  4858. n = int(split_str[:-1]) * 1000
  4859. elif split_str.endswith("M"):
  4860. n = int(split_str[:-1]) * 1000 * 1000
  4861. elif split_str.endswith("G"):
  4862. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  4863. elif split_str.isnumeric():
  4864. n = int(split_str)
  4865. else:
  4866. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  4867. if n < 0:
  4868. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  4869. return n
  4870. def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> str:
  4871. text_config = hparams.get("text_config", {})
  4872. vision_config = hparams.get("vision_config", {})
  4873. arch = hparams["architectures"][0]
  4874. # if "architectures" is found in the sub-config, use that instead
  4875. if model_type == ModelType.TEXT and text_config.get("architectures") is not None:
  4876. arch = text_config["architectures"][0]
  4877. elif model_type == ModelType.VISION and vision_config.get("architectures") is not None:
  4878. arch = vision_config["architectures"][0]
  4879. return arch
  4880. def main() -> None:
  4881. args = parse_args()
  4882. if args.print_supported_models:
  4883. logger.error("Supported models:")
  4884. ModelBase.print_registered_models()
  4885. sys.exit(0)
  4886. if args.verbose:
  4887. logging.basicConfig(level=logging.DEBUG)
  4888. else:
  4889. logging.basicConfig(level=logging.INFO)
  4890. dir_model = args.model
  4891. if args.remote:
  4892. from huggingface_hub import snapshot_download
  4893. local_dir = snapshot_download(
  4894. repo_id=str(dir_model),
  4895. allow_patterns=["LICENSE", "*.json", "*.md", "*.txt", "tokenizer.model"])
  4896. dir_model = Path(local_dir)
  4897. logger.info(f"Downloaded config and tokenizer to {local_dir}")
  4898. if not dir_model.is_dir():
  4899. logger.error(f'Error: {args.model} is not a directory')
  4900. sys.exit(1)
  4901. ftype_map: dict[str, gguf.LlamaFileType] = {
  4902. "f32": gguf.LlamaFileType.ALL_F32,
  4903. "f16": gguf.LlamaFileType.MOSTLY_F16,
  4904. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  4905. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  4906. "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
  4907. "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
  4908. "auto": gguf.LlamaFileType.GUESSED,
  4909. }
  4910. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  4911. if args.use_temp_file and is_split:
  4912. logger.error("Error: Cannot use temp file when splitting")
  4913. sys.exit(1)
  4914. if args.outfile is not None:
  4915. fname_out = args.outfile
  4916. elif args.remote:
  4917. # if remote, use the model ID as the output file name
  4918. fname_out = Path("./" + str(args.model).replace("/", "-") + "-{ftype}.gguf")
  4919. else:
  4920. fname_out = dir_model
  4921. logger.info(f"Loading model: {dir_model.name}")
  4922. if args.mmproj:
  4923. if "mmproj" not in fname_out.name:
  4924. fname_out = ModelBase.add_prefix_to_filename(fname_out, "mmproj-")
  4925. with torch.inference_mode():
  4926. output_type = ftype_map[args.outtype]
  4927. model_type = ModelType.VISION if args.mmproj else ModelType.TEXT
  4928. hparams = ModelBase.load_hparams(dir_model)
  4929. model_architecture = get_model_architecture(hparams, model_type)
  4930. logger.info(f"Model architecture: {model_architecture}")
  4931. try:
  4932. model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type)
  4933. except NotImplementedError:
  4934. logger.error(f"Model {model_architecture} is not supported")
  4935. sys.exit(1)
  4936. model_instance = model_class(dir_model, output_type, fname_out,
  4937. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  4938. eager=args.no_lazy,
  4939. metadata_override=args.metadata, model_name=args.model_name,
  4940. split_max_tensors=args.split_max_tensors,
  4941. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  4942. small_first_shard=args.no_tensor_first_split,
  4943. remote_hf_model_id=str(args.model) if args.remote else None)
  4944. if args.vocab_only:
  4945. logger.info("Exporting model vocab...")
  4946. model_instance.write_vocab()
  4947. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  4948. else:
  4949. logger.info("Exporting model...")
  4950. model_instance.write()
  4951. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  4952. logger.info(f"Model successfully exported to {out_path}")
  4953. if __name__ == '__main__':
  4954. main()