convert_hf_to_gguf.py 418 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import ast
  5. import logging
  6. import argparse
  7. import contextlib
  8. import json
  9. import os
  10. import re
  11. import sys
  12. from enum import IntEnum
  13. from pathlib import Path
  14. from hashlib import sha256
  15. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  16. from itertools import chain
  17. from transformers import AutoConfig
  18. import math
  19. import numpy as np
  20. import torch
  21. if TYPE_CHECKING:
  22. from torch import Tensor
  23. if 'NO_LOCAL_GGUF' not in os.environ:
  24. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  25. import gguf
  26. from gguf.vocab import MistralTokenizerType, MistralVocab
  27. from mistral_common.tokens.tokenizers.base import TokenizerVersion
  28. from mistral_common.tokens.tokenizers.multimodal import DATASET_MEAN, DATASET_STD
  29. from mistral_common.tokens.tokenizers.tekken import Tekkenizer
  30. from mistral_common.tokens.tokenizers.sentencepiece import (
  31. SentencePieceTokenizer,
  32. )
  33. logger = logging.getLogger("hf-to-gguf")
  34. ###### MODEL DEFINITIONS ######
  35. class SentencePieceTokenTypes(IntEnum):
  36. NORMAL = 1
  37. UNKNOWN = 2
  38. CONTROL = 3
  39. USER_DEFINED = 4
  40. UNUSED = 5
  41. BYTE = 6
  42. class ModelType(IntEnum):
  43. TEXT = 1
  44. MMPROJ = 2
  45. AnyModel = TypeVar("AnyModel", bound="type[ModelBase]")
  46. class ModelBase:
  47. _model_classes: dict[ModelType, dict[str, type[ModelBase]]] = {
  48. ModelType.TEXT: {},
  49. ModelType.MMPROJ: {},
  50. }
  51. dir_model: Path
  52. ftype: gguf.LlamaFileType
  53. fname_out: Path
  54. is_big_endian: bool
  55. endianess: gguf.GGUFEndian
  56. use_temp_file: bool
  57. lazy: bool
  58. dry_run: bool
  59. part_names: list[str]
  60. is_safetensors: bool
  61. hparams: dict[str, Any]
  62. tensor_names: set[str] | None
  63. gguf_writer: gguf.GGUFWriter
  64. model_name: str | None
  65. metadata_override: Path | None
  66. dir_model_card: Path
  67. remote_hf_model_id: str | None
  68. # subclasses should define this!
  69. model_arch: gguf.MODEL_ARCH
  70. # subclasses should initialize this!
  71. block_count: int
  72. tensor_map: gguf.TensorNameMap
  73. # Mistral format specifics
  74. is_mistral_format: bool = False
  75. disable_mistral_community_chat_template: bool = False
  76. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, *, is_big_endian: bool = False,
  77. use_temp_file: bool = False, eager: bool = False,
  78. metadata_override: Path | None = None, model_name: str | None = None,
  79. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
  80. small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None,
  81. disable_mistral_community_chat_template: bool = False):
  82. if type(self) is ModelBase or \
  83. type(self) is TextModel or \
  84. type(self) is MmprojModel:
  85. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  86. self.dir_model = dir_model
  87. self.ftype = ftype
  88. self.fname_out = fname_out
  89. self.is_big_endian = is_big_endian
  90. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  91. self.use_temp_file = use_temp_file
  92. self.lazy = not eager or (remote_hf_model_id is not None)
  93. self.dry_run = dry_run
  94. self.remote_hf_model_id = remote_hf_model_id
  95. if remote_hf_model_id is not None:
  96. self.is_safetensors = True
  97. def get_remote_tensors() -> Iterator[tuple[str, Tensor]]:
  98. logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}")
  99. remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id)
  100. self.tensor_names = set(name for name in remote_tensors.keys())
  101. for name, remote_tensor in remote_tensors.items():
  102. yield (name, LazyTorchTensor.from_remote_tensor(remote_tensor))
  103. self.get_tensors = get_remote_tensors
  104. else:
  105. prefix = "model" if not self.is_mistral_format else "consolidated"
  106. self.part_names = ModelBase.get_model_part_names(self.dir_model, prefix, ".safetensors")
  107. self.is_safetensors = len(self.part_names) > 0
  108. if not self.is_safetensors:
  109. self.part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  110. self.hparams = ModelBase.load_hparams(self.dir_model, self.is_mistral_format) if hparams is None else hparams
  111. self.tensor_names = None
  112. self.metadata_override = metadata_override
  113. self.model_name = model_name
  114. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  115. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  116. if self.ftype == gguf.LlamaFileType.GUESSED:
  117. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  118. _, first_tensor = next(self.get_tensors())
  119. if first_tensor.dtype == torch.float16:
  120. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  121. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  122. else:
  123. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  124. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  125. # Configure GGUF Writer
  126. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  127. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  128. # Mistral specific
  129. self.disable_mistral_community_chat_template = disable_mistral_community_chat_template
  130. @classmethod
  131. def add_prefix_to_filename(cls, path: Path, prefix: str) -> Path:
  132. stem, suffix = path.stem, path.suffix
  133. new_name = f"{prefix}{stem}{suffix}"
  134. return path.with_name(new_name)
  135. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  136. key = next((k for k in keys if k in self.hparams), None)
  137. if key is not None:
  138. return self.hparams[key]
  139. if optional:
  140. return None
  141. raise KeyError(f"could not find any of: {keys}")
  142. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  143. tensor_names_from_parts: set[str] = set()
  144. if not self.is_mistral_format:
  145. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  146. index_name += ".index.json"
  147. index_file = self.dir_model / index_name
  148. if index_file.is_file():
  149. self.tensor_names = set()
  150. logger.info(f"gguf: loading model weight map from '{index_name}'")
  151. with open(index_file, "r", encoding="utf-8") as f:
  152. index: dict[str, Any] = json.load(f)
  153. weight_map = index.get("weight_map")
  154. if weight_map is None or not isinstance(weight_map, dict):
  155. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  156. self.tensor_names.update(weight_map.keys())
  157. else:
  158. self.tensor_names = tensor_names_from_parts
  159. weight_map = {}
  160. else:
  161. self.tensor_names = tensor_names_from_parts
  162. weight_map = {}
  163. for part_name in self.part_names:
  164. logger.info(f"gguf: loading model part '{part_name}'")
  165. ctx: ContextManager[Any]
  166. if self.is_safetensors:
  167. from safetensors import safe_open
  168. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  169. else:
  170. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  171. with ctx as model_part:
  172. tensor_names_from_parts.update(model_part.keys())
  173. for name in model_part.keys():
  174. if self.is_safetensors:
  175. if self.lazy:
  176. data = model_part.get_slice(name)
  177. data = LazyTorchTensor.from_safetensors_slice(data)
  178. else:
  179. data = model_part.get_tensor(name)
  180. else:
  181. data = model_part[name]
  182. if self.lazy:
  183. data = LazyTorchTensor.from_eager(data)
  184. yield name, data
  185. # verify tensor name presence and identify potentially missing files
  186. if len(tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  187. missing = sorted(self.tensor_names.difference(tensor_names_from_parts))
  188. extra = sorted(tensor_names_from_parts.difference(self.tensor_names))
  189. missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
  190. if len(extra) == 0 and len(missing_files) > 0:
  191. raise ValueError(f"Missing or incomplete model files: {missing_files}\n"
  192. f"Missing tensors: {missing}")
  193. else:
  194. raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
  195. f"Missing tensors: {missing}\n"
  196. f"Extra tensors: {extra}")
  197. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  198. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  199. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  200. name: str = gguf.TENSOR_NAMES[key]
  201. if "{bid}" in name:
  202. assert bid is not None
  203. name = name.format(bid=bid)
  204. return name + suffix
  205. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  206. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  207. return False
  208. key_name: str = gguf.TENSOR_NAMES[key]
  209. if "{bid}" in key_name:
  210. if bid is None:
  211. return False
  212. key_name = key_name.format(bid=bid)
  213. else:
  214. if bid is not None:
  215. return False
  216. return name == (key_name + suffix)
  217. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  218. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  219. if new_name is None:
  220. raise ValueError(f"Can not map tensor {name!r}")
  221. return new_name
  222. def set_gguf_parameters(self):
  223. raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses")
  224. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  225. del bid # unused
  226. return [(self.map_tensor_name(name), data_torch)]
  227. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  228. del name, new_name, bid, n_dims # unused
  229. return False
  230. # some models need extra generated tensors (like rope_freqs)
  231. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  232. return ()
  233. def prepare_tensors(self):
  234. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  235. for name, data_torch in chain(self.generate_extra_tensors(), self.get_tensors()):
  236. # we don't need these
  237. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  238. continue
  239. old_dtype = data_torch.dtype
  240. # convert any unsupported data types to float32
  241. if data_torch.dtype not in (torch.float16, torch.float32):
  242. data_torch = data_torch.to(torch.float32)
  243. # use the first number-like part of the tensor name as the block id
  244. bid = None
  245. for part in name.split("."):
  246. if part.isdecimal():
  247. bid = int(part)
  248. break
  249. for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
  250. # TODO: why do we squeeze here?
  251. # data = data_torch.squeeze().numpy()
  252. data = data_torch.numpy()
  253. n_dims = len(data.shape)
  254. data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
  255. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  256. if n_dims <= 1 or new_name.endswith("_norm.weight"):
  257. data_qtype = gguf.GGMLQuantizationType.F32
  258. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  259. # Some tensor types are always in float32
  260. if data_qtype is False and (
  261. any(
  262. self.match_model_tensor_name(new_name, key, bid)
  263. for key in (
  264. gguf.MODEL_TENSOR.FFN_GATE_INP,
  265. gguf.MODEL_TENSOR.POS_EMBD,
  266. gguf.MODEL_TENSOR.TOKEN_TYPES,
  267. gguf.MODEL_TENSOR.SSM_CONV1D,
  268. gguf.MODEL_TENSOR.SHORTCONV_CONV,
  269. gguf.MODEL_TENSOR.TIME_MIX_FIRST,
  270. gguf.MODEL_TENSOR.TIME_MIX_W1,
  271. gguf.MODEL_TENSOR.TIME_MIX_W2,
  272. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
  273. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
  274. gguf.MODEL_TENSOR.TIME_MIX_LERP_FUSED,
  275. gguf.MODEL_TENSOR.POSNET_NORM1,
  276. gguf.MODEL_TENSOR.POSNET_NORM2,
  277. gguf.MODEL_TENSOR.V_ENC_EMBD_POS,
  278. gguf.MODEL_TENSOR.A_ENC_EMBD_POS,
  279. gguf.MODEL_TENSOR.ALTUP_CORRECT_COEF,
  280. gguf.MODEL_TENSOR.ALTUP_PREDICT_COEF,
  281. )
  282. )
  283. or not new_name.endswith(".weight")
  284. ):
  285. data_qtype = gguf.GGMLQuantizationType.F32
  286. if data_qtype is False and any(
  287. self.match_model_tensor_name(new_name, key, bid)
  288. for key in (
  289. gguf.MODEL_TENSOR.TOKEN_EMBD,
  290. gguf.MODEL_TENSOR.PER_LAYER_TOKEN_EMBD,
  291. gguf.MODEL_TENSOR.OUTPUT,
  292. gguf.MODEL_TENSOR.ALTUP_ROUTER,
  293. gguf.MODEL_TENSOR.LAUREL_L,
  294. gguf.MODEL_TENSOR.LAUREL_R,
  295. )
  296. ):
  297. if self.ftype in (
  298. gguf.LlamaFileType.MOSTLY_TQ1_0,
  299. gguf.LlamaFileType.MOSTLY_TQ2_0,
  300. ):
  301. # TODO: use Q4_K and Q6_K
  302. data_qtype = gguf.GGMLQuantizationType.F16
  303. # No override (data_qtype is False), or wants to be quantized (data_qtype is True)
  304. if isinstance(data_qtype, bool):
  305. if self.ftype == gguf.LlamaFileType.ALL_F32:
  306. data_qtype = gguf.GGMLQuantizationType.F32
  307. elif self.ftype == gguf.LlamaFileType.MOSTLY_F16:
  308. data_qtype = gguf.GGMLQuantizationType.F16
  309. elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  310. data_qtype = gguf.GGMLQuantizationType.BF16
  311. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
  312. data_qtype = gguf.GGMLQuantizationType.Q8_0
  313. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
  314. data_qtype = gguf.GGMLQuantizationType.TQ1_0
  315. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
  316. data_qtype = gguf.GGMLQuantizationType.TQ2_0
  317. else:
  318. raise ValueError(f"Unknown file type: {self.ftype.name}")
  319. try:
  320. data = gguf.quants.quantize(data, data_qtype)
  321. except gguf.QuantError as e:
  322. logger.warning("%s, %s", e, "falling back to F16")
  323. data_qtype = gguf.GGMLQuantizationType.F16
  324. data = gguf.quants.quantize(data, data_qtype)
  325. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  326. # reverse shape to make it similar to the internal ggml dimension order
  327. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  328. # n_dims is implicit in the shape
  329. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  330. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  331. def set_type(self):
  332. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  333. def prepare_metadata(self, vocab_only: bool):
  334. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  335. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  336. # If we are using HF model id, set the metadata name to the model id
  337. if self.remote_hf_model_id:
  338. self.metadata.name = self.remote_hf_model_id
  339. # Fallback to model directory name if metadata name is still missing
  340. if self.metadata.name is None:
  341. self.metadata.name = self.dir_model.name
  342. # Generate parameter weight class (useful for leader boards) if not yet determined
  343. if self.metadata.size_label is None and total_params > 0:
  344. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  345. self.set_type()
  346. logger.info("Set meta model")
  347. self.metadata.set_gguf_meta_model(self.gguf_writer)
  348. logger.info("Set model parameters")
  349. self.set_gguf_parameters()
  350. logger.info("Set model quantization version")
  351. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  352. def write_vocab(self):
  353. raise NotImplementedError("write_vocab() must be implemented in subclasses")
  354. def write(self):
  355. self.prepare_tensors()
  356. self.prepare_metadata(vocab_only=False)
  357. self.gguf_writer.write_header_to_file(path=self.fname_out)
  358. self.gguf_writer.write_kv_data_to_file()
  359. self.gguf_writer.write_tensors_to_file(progress=True)
  360. self.gguf_writer.close()
  361. @staticmethod
  362. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  363. part_names: list[str] = []
  364. for filename in os.listdir(dir_model):
  365. if filename.startswith(prefix) and filename.endswith(suffix):
  366. part_names.append(filename)
  367. part_names.sort()
  368. return part_names
  369. @staticmethod
  370. def load_hparams(dir_model: Path, is_mistral_format: bool):
  371. if is_mistral_format:
  372. with open(dir_model / "params.json", "r", encoding="utf-8") as f:
  373. config = json.load(f)
  374. return config
  375. try:
  376. # for security reason, we don't allow loading remote code by default
  377. # if a model need remote code, we will fallback to config.json
  378. config = AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict()
  379. except Exception as e:
  380. logger.warning(f"Failed to load model config from {dir_model}: {e}")
  381. logger.warning("Trying to load config.json instead")
  382. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  383. config = json.load(f)
  384. if "llm_config" in config:
  385. # rename for InternVL
  386. config["text_config"] = config["llm_config"]
  387. if "thinker_config" in config:
  388. # rename for Qwen2.5-Omni
  389. config["text_config"] = config["thinker_config"]["text_config"]
  390. return config
  391. @classmethod
  392. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  393. assert names
  394. def func(modelcls: AnyModel) -> AnyModel:
  395. model_type = ModelType.MMPROJ if modelcls.model_arch == gguf.MODEL_ARCH.MMPROJ else ModelType.TEXT
  396. for name in names:
  397. cls._model_classes[model_type][name] = modelcls
  398. return modelcls
  399. return func
  400. @classmethod
  401. def print_registered_models(cls):
  402. for model_type, model_classes in cls._model_classes.items():
  403. logger.error(f"{model_type.name} models:")
  404. for name in sorted(model_classes.keys()):
  405. logger.error(f" - {name}")
  406. @classmethod
  407. def from_model_architecture(cls, arch: str, model_type = ModelType.TEXT) -> type[ModelBase]:
  408. try:
  409. return cls._model_classes[model_type][arch]
  410. except KeyError:
  411. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  412. class TextModel(ModelBase):
  413. model_type = ModelType.TEXT
  414. hf_arch: str
  415. def __init__(self, *args, **kwargs):
  416. super().__init__(*args, **kwargs)
  417. if not self.is_mistral_format:
  418. self.hf_arch = get_model_architecture(self.hparams, self.model_type)
  419. else:
  420. self.hf_arch = ""
  421. if "text_config" in self.hparams:
  422. # move the text_config to the root level
  423. self.hparams = {**self.hparams, **self.hparams["text_config"]}
  424. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  425. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  426. @classmethod
  427. def __init_subclass__(cls):
  428. # can't use an abstract property, because overriding it without type errors
  429. # would require using decorated functions instead of simply defining the property
  430. if "model_arch" not in cls.__dict__:
  431. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  432. def set_vocab(self):
  433. self._set_vocab_gpt2()
  434. def prepare_metadata(self, vocab_only: bool):
  435. super().prepare_metadata(vocab_only=vocab_only)
  436. total_params = self.gguf_writer.get_total_parameter_count()[0]
  437. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  438. output_type: str = self.ftype.name.partition("_")[2]
  439. # Filename Output
  440. if self.fname_out.is_dir():
  441. # Generate default filename based on model specification and available metadata
  442. if not vocab_only:
  443. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  444. else:
  445. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  446. # Use the default filename
  447. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  448. else:
  449. # Output path is a custom defined templated filename
  450. # Note: `not is_dir()` is used because `.is_file()` will not detect
  451. # file template strings as it doesn't actually exist as a file
  452. # Process templated file name with the output ftype, useful with the "auto" ftype
  453. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  454. logger.info("Set model tokenizer")
  455. self.set_vocab()
  456. def set_gguf_parameters(self):
  457. self.gguf_writer.add_block_count(self.block_count)
  458. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length"], optional=True)) is not None:
  459. self.gguf_writer.add_context_length(n_ctx)
  460. logger.info(f"gguf: context length = {n_ctx}")
  461. if (n_embd := self.find_hparam(["hidden_size", "n_embd", "dim"], optional=True)) is not None:
  462. self.gguf_writer.add_embedding_length(n_embd)
  463. logger.info(f"gguf: embedding length = {n_embd}")
  464. if (n_ff := self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"], optional=True)) is not None:
  465. self.gguf_writer.add_feed_forward_length(n_ff)
  466. logger.info(f"gguf: feed forward length = {n_ff}")
  467. if (n_head := self.find_hparam(["num_attention_heads", "n_head", "n_heads"], optional=True)) is not None:
  468. self.gguf_writer.add_head_count(n_head)
  469. logger.info(f"gguf: head count = {n_head}")
  470. if (n_head_kv := self.find_hparam(["num_key_value_heads", "n_kv_heads"], optional=True)) is not None:
  471. self.gguf_writer.add_head_count_kv(n_head_kv)
  472. logger.info(f"gguf: key-value head count = {n_head_kv}")
  473. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  474. self.gguf_writer.add_rope_freq_base(rope_theta)
  475. logger.info(f"gguf: rope theta = {rope_theta}")
  476. if (f_rms_eps := self.find_hparam(["rms_norm_eps", "norm_eps"], optional=True)) is not None:
  477. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  478. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  479. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  480. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  481. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  482. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  483. self.gguf_writer.add_expert_count(n_experts)
  484. logger.info(f"gguf: expert count = {n_experts}")
  485. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  486. self.gguf_writer.add_expert_used_count(n_experts_used)
  487. logger.info(f"gguf: experts used count = {n_experts_used}")
  488. if (head_dim := self.hparams.get("head_dim")) is not None:
  489. self.gguf_writer.add_key_length(head_dim)
  490. self.gguf_writer.add_value_length(head_dim)
  491. self.gguf_writer.add_file_type(self.ftype)
  492. logger.info(f"gguf: file type = {self.ftype}")
  493. def write_vocab(self):
  494. if len(self.gguf_writer.tensors) != 1:
  495. raise ValueError('Splitting the vocabulary is not supported')
  496. self.prepare_metadata(vocab_only=True)
  497. self.gguf_writer.write_header_to_file(path=self.fname_out)
  498. self.gguf_writer.write_kv_data_to_file()
  499. self.gguf_writer.close()
  500. def does_token_look_special(self, token: str | bytes) -> bool:
  501. if isinstance(token, (bytes, bytearray)):
  502. token_text = token.decode(encoding="utf-8")
  503. elif isinstance(token, memoryview):
  504. token_text = token.tobytes().decode(encoding="utf-8")
  505. else:
  506. token_text = token
  507. # Some models mark some added tokens which ought to be control tokens as not special.
  508. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  509. seems_special = token_text in (
  510. "<pad>", # deepseek-coder
  511. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  512. )
  513. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  514. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  515. # TODO: should these be marked as UNUSED instead? (maybe not)
  516. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  517. return seems_special
  518. # used for GPT-2 BPE and WordPiece vocabs
  519. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  520. tokens: list[str] = []
  521. toktypes: list[int] = []
  522. from transformers import AutoTokenizer
  523. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  524. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  525. assert max(tokenizer.vocab.values()) < vocab_size
  526. tokpre = self.get_vocab_base_pre(tokenizer)
  527. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  528. added_vocab = tokenizer.get_added_vocab()
  529. added_tokens_decoder = tokenizer.added_tokens_decoder
  530. for i in range(vocab_size):
  531. if i not in reverse_vocab:
  532. tokens.append(f"[PAD{i}]")
  533. toktypes.append(gguf.TokenType.UNUSED)
  534. else:
  535. token: str = reverse_vocab[i]
  536. if token in added_vocab:
  537. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  538. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  539. if not added_tokens_decoder[i].normalized:
  540. previous_token = token
  541. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  542. if previous_token != token:
  543. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  544. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  545. toktypes.append(gguf.TokenType.CONTROL)
  546. else:
  547. # NOTE: this was added for Gemma.
  548. # Encoding and decoding the tokens above isn't sufficient for this case.
  549. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  550. toktypes.append(gguf.TokenType.USER_DEFINED)
  551. else:
  552. toktypes.append(gguf.TokenType.NORMAL)
  553. tokens.append(token)
  554. return tokens, toktypes, tokpre
  555. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  556. # do not modify it manually!
  557. # ref: https://github.com/ggml-org/llama.cpp/pull/6920
  558. # Marker: Start get_vocab_base_pre
  559. def get_vocab_base_pre(self, tokenizer) -> str:
  560. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  561. # is specific for the BPE pre-tokenizer used by the model
  562. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  563. # use in llama.cpp to implement the same pre-tokenizer
  564. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  565. chktok = tokenizer.encode(chktxt)
  566. chkhsh = sha256(str(chktok).encode()).hexdigest()
  567. logger.debug(f"chktok: {chktok}")
  568. logger.debug(f"chkhsh: {chkhsh}")
  569. res = None
  570. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  571. # or pull the latest version of the model from Huggingface
  572. # don't edit the hashes manually!
  573. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
  574. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  575. res = "chatglm-bpe"
  576. if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
  577. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  578. res = "chatglm-bpe"
  579. if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
  580. # ref: https://huggingface.co/THUDM/glm-4-9b-hf
  581. res = "glm4"
  582. if chkhsh == "9ca2dd618e8afaf09731a7cf6e2105b373ba6a1821559f258b272fe83e6eb902":
  583. # ref: https://huggingface.co/zai-org/GLM-4.5-Air
  584. res = "glm4"
  585. if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
  586. # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
  587. res = "minerva-7b"
  588. if chkhsh == "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664":
  589. # ref: https://huggingface.co/tencent/Hunyuan-A13B-Instruct
  590. res = "hunyuan"
  591. if chkhsh == "bba3b3366b646dbdded5dbc42d59598b849371afc42f7beafa914afaa5b70aa6":
  592. # ref: https://huggingface.co/tencent/Hunyuan-4B-Instruct
  593. res = "hunyuan-dense"
  594. if chkhsh == "a6b57017d60e6edb4d88ecc2845188e0eb333a70357e45dcc9b53964a73bbae6":
  595. # ref: https://huggingface.co/tiiuae/Falcon-H1-0.5B-Base
  596. res = "falcon-h1"
  597. if chkhsh == "60476e1243776c4fb1b993dbd7a5f15ac22f83c80afdf425fa5ae01c8d44ef86":
  598. # ref: https://huggingface.co/tiiuae/Falcon-H1-1B-Base
  599. res = "falcon-h1"
  600. if chkhsh == "3eda48b4c4dc7de733d1a8b3e3b4a85243dbbf704da2ee9d42c6beced8897896":
  601. # ref: https://huggingface.co/tiiuae/Falcon-H1-7B-Base
  602. res = "falcon-h1"
  603. if chkhsh == "48f8e02c0359c0bbdd82f26909171fac1c18a457bb47573ed1fe3bbb2c1cfd4b":
  604. # ref: https://huggingface.co/tiiuae/Falcon-H1-34B-Base
  605. res = "falcon-h1"
  606. if chkhsh == "81212dc7cdb7e0c1074ca62c5aeab0d43c9f52b8a737be7b12a777c953027890":
  607. # ref: https://huggingface.co/moonshotai/Kimi-K2-Base
  608. res = "kimi-k2"
  609. if chkhsh == "d4540891389ea895b53b399da6ac824becc30f2fba0e9ddbb98f92e55ca0e97c":
  610. # ref: https://huggingface.co/Qwen/Qwen3-Embedding-0.6B
  611. res = "qwen2"
  612. if chkhsh == "66b8d4e19ab16c3bfd89bce5d785fb7e0155e8648708a1f42077cb9fe002c273":
  613. # ref: https://huggingface.co/alvarobartt/grok-2-tokenizer
  614. res = "grok-2"
  615. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  616. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  617. res = "llama-bpe"
  618. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  619. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  620. res = "deepseek-llm"
  621. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  622. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  623. res = "deepseek-coder"
  624. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  625. # ref: https://huggingface.co/tiiuae/falcon-7b
  626. res = "falcon"
  627. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  628. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  629. res = "bert-bge"
  630. if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e":
  631. # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base
  632. res = "falcon3"
  633. if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
  634. # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
  635. res = "bert-bge-large"
  636. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  637. # ref: https://huggingface.co/mosaicml/mpt-7b
  638. res = "mpt"
  639. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  640. # ref: https://huggingface.co/bigcode/starcoder2-3b
  641. res = "starcoder"
  642. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  643. # ref: https://huggingface.co/openai-community/gpt2
  644. res = "gpt-2"
  645. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  646. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  647. res = "stablelm2"
  648. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  649. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  650. res = "refact"
  651. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  652. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  653. res = "command-r"
  654. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  655. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  656. res = "qwen2"
  657. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  658. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  659. res = "olmo"
  660. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  661. # ref: https://huggingface.co/databricks/dbrx-base
  662. res = "dbrx"
  663. if chkhsh == "c7699093ba4255a91e702aa38a596aa81669f3525dae06c2953267dde580f448":
  664. # ref: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  665. res = "jina-v1-en"
  666. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  667. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  668. res = "jina-v2-en"
  669. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  670. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  671. res = "jina-v2-es"
  672. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  673. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  674. res = "jina-v2-de"
  675. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  676. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  677. res = "smaug-bpe"
  678. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  679. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  680. res = "poro-chat"
  681. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  682. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  683. res = "jina-v2-code"
  684. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  685. # ref: https://huggingface.co/LumiOpen/Viking-7B
  686. res = "viking"
  687. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  688. # ref: https://huggingface.co/core42/jais-13b
  689. res = "jais"
  690. if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f":
  691. # ref: https://huggingface.co/WisdomShell/CodeShell-7B
  692. res = "codeshell"
  693. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  694. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  695. res = "tekken"
  696. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  697. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  698. res = "smollm"
  699. if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
  700. # ref: https://huggingface.co/bigscience/bloom
  701. res = "bloom"
  702. if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
  703. # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
  704. res = "gpt3-finnish"
  705. if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
  706. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
  707. res = "exaone"
  708. if chkhsh == "fcace8b9cac38ce847670c970cd5892031a753a1ef381abd1d9af00f713da085":
  709. # ref: https://huggingface.co/microsoft/phi-2
  710. res = "phi-2"
  711. if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
  712. # ref: https://huggingface.co/facebook/chameleon-7b
  713. res = "chameleon"
  714. if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65":
  715. # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base
  716. res = "roberta-bpe"
  717. if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb":
  718. # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct
  719. res = "gigachat"
  720. if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
  721. # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
  722. res = "megrez"
  723. if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
  724. # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
  725. res = "deepseek-v3"
  726. if chkhsh == "b3f499bb4255f8ca19fccd664443283318f2fd2414d5e0b040fbdd0cc195d6c5":
  727. # ref: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
  728. res = "deepseek-r1-qwen"
  729. if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e":
  730. # ref: https://huggingface.co/Xenova/gpt-4o
  731. res = "gpt-4o"
  732. if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
  733. # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
  734. res = "superbpe"
  735. if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
  736. # ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
  737. res = "trillion"
  738. if chkhsh == "96a5f08be6259352137b512d4157e333e21df7edd3fcd152990608735a65b224":
  739. # ref: https://huggingface.co/inclusionAI/Ling-lite
  740. res = "bailingmoe"
  741. if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
  742. # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
  743. res = "llama4"
  744. if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3":
  745. # ref: https://huggingface.co/mistral-community/pixtral-12b
  746. res = "pixtral"
  747. if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
  748. # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
  749. res = "seed-coder"
  750. if chkhsh == "b0a6b1c0bd5998ebd9df08611efde34a4ff03faed45ae09c43e6b31ebd4b94cf":
  751. # ref: https://huggingface.co/skt/A.X-4.0
  752. res = "a.x-4.0"
  753. if chkhsh == "f6791d196f87ce6b56a7d234be618e0d58f8cda3549416635b2bebcd22cd95c4":
  754. # ref: https://huggingface.co/K-intelligence/Midm-2.0-Base-Instruct
  755. res = "midm-2.0"
  756. if chkhsh == "169bf0296a13c4d9b7672313f749eb36501d931022de052aad6e36f2bf34dd51":
  757. # ref: https://huggingface.co/LiquidAI/LFM2-Tokenizer
  758. res = "lfm2"
  759. if chkhsh == "2085e1638f6c377a0aa4ead21b27bb4cb941bf800df86ed391011769c1758dfb":
  760. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B
  761. res = "exaone4"
  762. if chkhsh == "a1e163ecab2e718a4c829d1148b6e86824ec36163bb71941c3dca9cd5ac25756":
  763. # ref: https://huggingface.co/JetBrains/Mellum-4b-base
  764. res = "mellum"
  765. if chkhsh == "9b1be57e70d20d9501b2b3186e792d81181ae36ada3903c26f9fea418cf87206":
  766. # ref: https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base
  767. res = "llada-moe"
  768. if res is None:
  769. logger.warning("\n")
  770. logger.warning("**************************************************************************************")
  771. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  772. logger.warning("** There are 2 possible reasons for this:")
  773. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  774. logger.warning("** - the pre-tokenization config has changed upstream")
  775. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  776. logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
  777. logger.warning("**")
  778. logger.warning(f"** chkhsh: {chkhsh}")
  779. logger.warning("**************************************************************************************")
  780. logger.warning("\n")
  781. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  782. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  783. logger.debug(f"chkhsh: {chkhsh}")
  784. return res
  785. # Marker: End get_vocab_base_pre
  786. def _set_vocab_none(self) -> None:
  787. self.gguf_writer.add_tokenizer_model("none")
  788. def _set_vocab_gpt2(self) -> None:
  789. tokens, toktypes, tokpre = self.get_vocab_base()
  790. self.gguf_writer.add_tokenizer_model("gpt2")
  791. self.gguf_writer.add_tokenizer_pre(tokpre)
  792. self.gguf_writer.add_token_list(tokens)
  793. self.gguf_writer.add_token_types(toktypes)
  794. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  795. special_vocab.add_to_gguf(self.gguf_writer)
  796. def _set_vocab_qwen(self):
  797. dir_model = self.dir_model
  798. hparams = self.hparams
  799. tokens: list[str] = []
  800. toktypes: list[int] = []
  801. from transformers import AutoTokenizer
  802. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  803. vocab_size = hparams["vocab_size"]
  804. assert max(tokenizer.get_vocab().values()) < vocab_size
  805. tokpre = self.get_vocab_base_pre(tokenizer)
  806. merges = []
  807. vocab = {}
  808. mergeable_ranks = tokenizer.mergeable_ranks
  809. for token, rank in mergeable_ranks.items():
  810. vocab[QwenModel.token_bytes_to_string(token)] = rank
  811. if len(token) == 1:
  812. continue
  813. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  814. assert len(merged) == 2
  815. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  816. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  817. added_vocab = tokenizer.special_tokens
  818. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  819. for i in range(vocab_size):
  820. if i not in reverse_vocab:
  821. tokens.append(f"[PAD{i}]")
  822. toktypes.append(gguf.TokenType.UNUSED)
  823. elif reverse_vocab[i] in added_vocab:
  824. tokens.append(reverse_vocab[i])
  825. toktypes.append(gguf.TokenType.CONTROL)
  826. else:
  827. tokens.append(reverse_vocab[i])
  828. toktypes.append(gguf.TokenType.NORMAL)
  829. self.gguf_writer.add_tokenizer_model("gpt2")
  830. self.gguf_writer.add_tokenizer_pre(tokpre)
  831. self.gguf_writer.add_token_list(tokens)
  832. self.gguf_writer.add_token_types(toktypes)
  833. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  834. special_vocab.merges = merges
  835. # only add special tokens when they were not already loaded from config.json
  836. if len(special_vocab.special_token_ids) == 0:
  837. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  838. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  839. # this one is usually not in config.json anyway
  840. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  841. special_vocab.add_to_gguf(self.gguf_writer)
  842. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  843. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  844. self.gguf_writer.add_tokenizer_model("llama")
  845. self.gguf_writer.add_tokenizer_pre("default")
  846. self.gguf_writer.add_token_list(tokens)
  847. self.gguf_writer.add_token_scores(scores)
  848. self.gguf_writer.add_token_types(toktypes)
  849. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  850. special_vocab.add_to_gguf(self.gguf_writer)
  851. def _create_vocab_sentencepiece(self):
  852. from sentencepiece import SentencePieceProcessor
  853. tokenizer_path = self.dir_model / 'tokenizer.model'
  854. if not tokenizer_path.is_file():
  855. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  856. tokenizer = SentencePieceProcessor()
  857. tokenizer.LoadFromFile(str(tokenizer_path))
  858. vocab_size = self.find_hparam([
  859. "vocab_size_per_layer_input", # gemma3n
  860. "vocab_size",
  861. ], optional=True) or tokenizer.vocab_size()
  862. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  863. scores: list[float] = [-10000.0] * vocab_size
  864. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  865. for token_id in range(tokenizer.vocab_size()):
  866. if token_id >= vocab_size:
  867. logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}')
  868. break
  869. piece = tokenizer.IdToPiece(token_id)
  870. text = piece.encode("utf-8")
  871. score = tokenizer.GetScore(token_id)
  872. toktype = SentencePieceTokenTypes.NORMAL
  873. if tokenizer.IsUnknown(token_id):
  874. toktype = SentencePieceTokenTypes.UNKNOWN
  875. elif tokenizer.IsControl(token_id):
  876. toktype = SentencePieceTokenTypes.CONTROL
  877. elif tokenizer.IsUnused(token_id):
  878. toktype = SentencePieceTokenTypes.UNUSED
  879. elif tokenizer.IsByte(token_id):
  880. toktype = SentencePieceTokenTypes.BYTE
  881. tokens[token_id] = text
  882. scores[token_id] = score
  883. toktypes[token_id] = toktype
  884. added_tokens_file = self.dir_model / 'added_tokens.json'
  885. if added_tokens_file.is_file():
  886. with open(added_tokens_file, "r", encoding="utf-8") as f:
  887. added_tokens_json = json.load(f)
  888. for key in added_tokens_json:
  889. token_id = added_tokens_json[key]
  890. if token_id >= vocab_size:
  891. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  892. continue
  893. tokens[token_id] = key.encode("utf-8")
  894. scores[token_id] = -1000.0
  895. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  896. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  897. if tokenizer_config_file.is_file():
  898. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  899. tokenizer_config_json = json.load(f)
  900. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  901. for token_id, token_data in added_tokens_decoder.items():
  902. token_id = int(token_id)
  903. token: str = token_data["content"]
  904. if token_id >= vocab_size:
  905. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  906. continue
  907. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  908. if tokens[token_id] != token.encode("utf-8"):
  909. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  910. if token_data.get("special") or self.does_token_look_special(token):
  911. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  912. else:
  913. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  914. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  915. scores[token_id] = -1000.0
  916. tokens[token_id] = token.encode("utf-8")
  917. if vocab_size > len(tokens):
  918. pad_count = vocab_size - len(tokens)
  919. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  920. for i in range(1, pad_count + 1):
  921. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  922. scores.append(-1000.0)
  923. toktypes.append(SentencePieceTokenTypes.UNUSED)
  924. return tokens, scores, toktypes
  925. def _set_vocab_llama_hf(self):
  926. vocab = gguf.LlamaHfVocab(self.dir_model)
  927. tokens = []
  928. scores = []
  929. toktypes = []
  930. for text, score, toktype in vocab.all_tokens():
  931. tokens.append(text)
  932. scores.append(score)
  933. toktypes.append(toktype)
  934. assert len(tokens) == vocab.vocab_size
  935. self.gguf_writer.add_tokenizer_model("llama")
  936. self.gguf_writer.add_tokenizer_pre("default")
  937. self.gguf_writer.add_token_list(tokens)
  938. self.gguf_writer.add_token_scores(scores)
  939. self.gguf_writer.add_token_types(toktypes)
  940. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  941. special_vocab.add_to_gguf(self.gguf_writer)
  942. def _set_vocab_rwkv_world(self):
  943. assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file()
  944. vocab_size = self.hparams.get("vocab_size", 65536)
  945. tokens: list[bytes] = ['<s>'.encode("utf-8")]
  946. toktypes: list[int] = [gguf.TokenType.CONTROL]
  947. with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f:
  948. lines = f.readlines()
  949. for line in lines:
  950. parts = line.split(' ')
  951. assert len(parts) >= 3
  952. token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1])
  953. token = token.encode("utf-8") if isinstance(token, str) else token
  954. assert isinstance(token, bytes)
  955. assert len(token) == token_len
  956. token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff"
  957. tokens.append(token_text.encode("utf-8"))
  958. toktypes.append(gguf.TokenType.NORMAL)
  959. remainder = vocab_size - len(tokens)
  960. assert remainder >= 0
  961. for i in range(len(tokens), vocab_size):
  962. tokens.append(f"[PAD{i}]".encode("utf-8"))
  963. toktypes.append(gguf.TokenType.UNUSED)
  964. self.gguf_writer.add_tokenizer_model("rwkv")
  965. self.gguf_writer.add_token_list(tokens)
  966. self.gguf_writer.add_token_types(toktypes)
  967. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  968. if special_vocab.chat_template is None:
  969. template_path = Path(__file__).parent / "models" / "templates" / "llama-cpp-rwkv-world.jinja"
  970. if template_path.is_file():
  971. with open(template_path, "r", encoding="utf-8") as f:
  972. template = f.read()
  973. else:
  974. template = "rwkv-world"
  975. special_vocab.chat_template = template
  976. # hack: Add '\n\n' as the EOT token to make it chat normally
  977. special_vocab._set_special_token("eot", 261)
  978. # hack: Override these as they have already been set (incorrectly)
  979. special_vocab.special_token_ids["bos"] = 0
  980. special_vocab.special_token_ids["eos"] = 0
  981. special_vocab.add_to_gguf(self.gguf_writer)
  982. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  983. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  984. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  985. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  986. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  987. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  988. assert field # tokenizer model
  989. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  990. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  991. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  992. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  993. assert field # token list
  994. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  995. if model_name == "llama-spm":
  996. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  997. assert field # token scores
  998. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  999. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  1000. assert field # token types
  1001. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  1002. if model_name != "llama-spm":
  1003. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  1004. assert field # token merges
  1005. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  1006. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  1007. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  1008. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  1009. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  1010. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  1011. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  1012. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  1013. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  1014. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  1015. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  1016. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  1017. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  1018. def _try_set_pooling_type(self) -> None:
  1019. # get pooling path
  1020. pooling_path = None
  1021. module_path = self.dir_model / "modules.json"
  1022. if module_path.is_file():
  1023. with open(module_path, encoding="utf-8") as f:
  1024. modules = json.load(f)
  1025. for mod in modules:
  1026. if mod["type"] == "sentence_transformers.models.Pooling":
  1027. pooling_path = mod["path"]
  1028. break
  1029. # get pooling type
  1030. if pooling_path is not None:
  1031. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  1032. pooling = json.load(f)
  1033. if pooling["pooling_mode_mean_tokens"]:
  1034. pooling_type = gguf.PoolingType.MEAN
  1035. elif pooling["pooling_mode_cls_token"]:
  1036. pooling_type = gguf.PoolingType.CLS
  1037. elif pooling["pooling_mode_lasttoken"]:
  1038. pooling_type = gguf.PoolingType.LAST
  1039. else:
  1040. raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported")
  1041. self.gguf_writer.add_pooling_type(pooling_type)
  1042. def _set_vocab_interns1(self):
  1043. tokens: list[str] = []
  1044. toktypes: list[int] = []
  1045. from transformers import AutoTokenizer
  1046. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  1047. vocab = getattr(tokenizer, 'vocab', tokenizer.get_vocab())
  1048. vocab_size = self.hparams.get("vocab_size", len(vocab))
  1049. assert max(vocab.values()) < vocab_size
  1050. tokpre = self.get_vocab_base_pre(tokenizer)
  1051. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab.items()}
  1052. added_vocab = tokenizer.get_added_vocab()
  1053. added_tokens_decoder = tokenizer.added_tokens_decoder
  1054. for i in range(vocab_size):
  1055. if i not in reverse_vocab:
  1056. tokens.append(f"[PAD{i}]")
  1057. toktypes.append(gguf.TokenType.UNUSED)
  1058. else:
  1059. token: str = reverse_vocab[i]
  1060. if token in added_vocab:
  1061. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  1062. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  1063. if not added_tokens_decoder[i].normalized:
  1064. previous_token = token
  1065. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  1066. if previous_token != token:
  1067. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  1068. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  1069. toktypes.append(gguf.TokenType.CONTROL)
  1070. else:
  1071. toktypes.append(gguf.TokenType.USER_DEFINED)
  1072. else:
  1073. toktypes.append(gguf.TokenType.NORMAL)
  1074. tokens.append(token)
  1075. self.gguf_writer.add_tokenizer_model("gpt2")
  1076. self.gguf_writer.add_tokenizer_pre(tokpre)
  1077. self.gguf_writer.add_token_list(tokens)
  1078. self.gguf_writer.add_token_types(toktypes)
  1079. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  1080. special_vocab._set_special_token("bos", 151643)
  1081. special_vocab.add_to_gguf(self.gguf_writer)
  1082. class MmprojModel(ModelBase):
  1083. model_type = ModelType.MMPROJ
  1084. model_arch = gguf.MODEL_ARCH.MMPROJ
  1085. preprocessor_config: dict[str, Any]
  1086. global_config: dict[str, Any]
  1087. n_block_keys = ["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"]
  1088. has_vision_encoder: bool = True # by default
  1089. has_audio_encoder: bool = False
  1090. # for models having multiple encoders, we need to separate their hparams
  1091. hparams_vision: dict[str, Any] | None = None
  1092. hparams_audio: dict[str, Any] | None = None
  1093. def __init__(self, *args, **kwargs):
  1094. super().__init__(*args, **kwargs)
  1095. if self.model_arch != gguf.MODEL_ARCH.MMPROJ:
  1096. raise TypeError("MmprojModel must be subclassed with model_arch = gguf.MODEL_ARCH.MMPROJ")
  1097. # get n_embd of the text model
  1098. if not self.is_mistral_format:
  1099. if "text_config" not in self.hparams:
  1100. self.hparams["text_config"] = {}
  1101. if "audio_config" not in self.hparams:
  1102. self.hparams["audio_config"] = {}
  1103. text_config = {**self.hparams, **self.hparams["text_config"]}
  1104. self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0))
  1105. else:
  1106. text_config = {
  1107. k: v for k, v in self.hparams.items() if k not in ["vision_encoder", "audio_encoder"]
  1108. }
  1109. self.n_embd_text = text_config.get("hidden_dim", 0)
  1110. assert self.n_embd_text > 0, "n_embd not found in hparams"
  1111. # move vision config to the top level, while preserving the original hparams in global_config
  1112. import copy
  1113. self.global_config = copy.deepcopy(self.hparams)
  1114. self.hparams_vision = self.get_vision_config()
  1115. self.hparams_audio = self.get_audio_config()
  1116. if self.hparams_vision is None and self.hparams_audio is None:
  1117. raise ValueError("vision_config / audio_config not found in hparams")
  1118. # for compat with vision-only models
  1119. self.hparams = self.hparams_vision or self.hparams_audio or self.hparams
  1120. # TODO @ngxson : this is a hack to support both vision and audio encoders
  1121. have_multiple_encoders = self.has_audio_encoder and self.has_vision_encoder
  1122. self.block_count = 128 if have_multiple_encoders else self.find_hparam(self.n_block_keys, True)
  1123. self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.MMPROJ, self.block_count)
  1124. # load preprocessor config
  1125. if not self.is_mistral_format:
  1126. with open(self.dir_model / "preprocessor_config.json", "r", encoding="utf-8") as f:
  1127. self.preprocessor_config = json.load(f)
  1128. def get_vision_config(self) -> dict[str, Any] | None:
  1129. config_name = "vision_config" if not self.is_mistral_format else "vision_encoder"
  1130. return self.global_config.get(config_name)
  1131. def get_audio_config(self) -> dict[str, Any] | None:
  1132. return self.global_config.get("audio_config")
  1133. def set_type(self):
  1134. self.gguf_writer.add_type(gguf.GGUFType.MMPROJ)
  1135. def set_gguf_parameters(self):
  1136. self.gguf_writer.add_file_type(self.ftype)
  1137. if self.has_vision_encoder:
  1138. self.gguf_writer.add_clip_has_vision_encoder(True)
  1139. self.gguf_writer.add_vision_projection_dim(self.n_embd_text)
  1140. # vision config
  1141. self.gguf_writer.add_vision_image_size(self.find_vparam(["image_size"]))
  1142. self.gguf_writer.add_vision_patch_size(self.find_vparam(["patch_size"]))
  1143. self.gguf_writer.add_vision_embedding_length(self.find_vparam(["hidden_size"]))
  1144. self.gguf_writer.add_vision_feed_forward_length(self.find_vparam(["intermediate_size"]))
  1145. self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys))
  1146. self.gguf_writer.add_vision_head_count(self.find_vparam(["num_attention_heads"]))
  1147. # preprocessor config
  1148. image_mean = DATASET_MEAN if self.is_mistral_format else self.preprocessor_config["image_mean"]
  1149. image_std = DATASET_STD if self.is_mistral_format else self.preprocessor_config["image_std"]
  1150. self.gguf_writer.add_vision_image_mean(image_mean)
  1151. self.gguf_writer.add_vision_image_std(image_std)
  1152. if self.has_audio_encoder:
  1153. self.gguf_writer.add_clip_has_audio_encoder(True)
  1154. self.gguf_writer.add_audio_projection_dim(self.n_embd_text)
  1155. # audio config
  1156. self.gguf_writer.add_audio_embedding_length(self.find_aparam(["hidden_size"]))
  1157. self.gguf_writer.add_audio_feed_forward_length(self.find_aparam(["intermediate_size"]))
  1158. self.gguf_writer.add_audio_block_count(self.find_aparam(self.n_block_keys))
  1159. self.gguf_writer.add_audio_head_count(self.find_aparam(["num_attention_heads"]))
  1160. if not self.has_vision_encoder and not self.has_audio_encoder:
  1161. raise ValueError("MmprojModel must have either vision or audio encoder")
  1162. def write_vocab(self):
  1163. raise ValueError("MmprojModel does not support vocab writing")
  1164. def find_vparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1165. assert self.hparams_vision is not None
  1166. return self._find_param(self.hparams_vision, keys, optional)
  1167. def find_aparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1168. assert self.hparams_audio is not None
  1169. return self._find_param(self.hparams_audio, keys, optional)
  1170. def _find_param(self, obj: dict[str, Any], keys: Iterable[str], optional: bool = False) -> Any:
  1171. key = next((k for k in keys if k in obj), None)
  1172. if key is not None:
  1173. return obj[key]
  1174. if optional:
  1175. return None
  1176. raise KeyError(f"could not find any of: {keys}")
  1177. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1178. del bid, name, n_dims # unused
  1179. if ".patch_embd.weight" in new_name:
  1180. return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
  1181. return False
  1182. @ModelBase.register("GPTNeoXForCausalLM")
  1183. class GPTNeoXModel(TextModel):
  1184. model_arch = gguf.MODEL_ARCH.GPTNEOX
  1185. def set_gguf_parameters(self):
  1186. block_count = self.hparams["num_hidden_layers"]
  1187. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1188. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1189. self.gguf_writer.add_block_count(block_count)
  1190. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1191. self.gguf_writer.add_rope_dimension_count(
  1192. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  1193. )
  1194. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1195. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  1196. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  1197. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1198. del bid # unused
  1199. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1200. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1201. tensors: list[tuple[str, Tensor]] = []
  1202. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  1203. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1204. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1205. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1206. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1207. data_torch = torch.cat(
  1208. (
  1209. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1210. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1211. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1212. ),
  1213. dim=0,
  1214. )
  1215. logger.info("re-format attention.linear_qkv.weight")
  1216. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  1217. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1218. data_torch = torch.cat(
  1219. (
  1220. qkv_bias[:, 0, :].reshape((n_embed,)),
  1221. qkv_bias[:, 1, :].reshape((n_embed,)),
  1222. qkv_bias[:, 2, :].reshape((n_embed,)),
  1223. ),
  1224. dim=0,
  1225. )
  1226. logger.info("re-format attention.linear_qkv.bias")
  1227. tensors.append((self.map_tensor_name(name), data_torch))
  1228. return tensors
  1229. @ModelBase.register("BloomForCausalLM", "BloomModel")
  1230. class BloomModel(TextModel):
  1231. model_arch = gguf.MODEL_ARCH.BLOOM
  1232. def set_gguf_parameters(self):
  1233. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1234. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1235. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  1236. self.gguf_writer.add_embedding_length(n_embed)
  1237. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  1238. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1239. self.gguf_writer.add_head_count(n_head)
  1240. self.gguf_writer.add_head_count_kv(n_head)
  1241. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1242. self.gguf_writer.add_file_type(self.ftype)
  1243. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1244. del bid # unused
  1245. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1246. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1247. name = re.sub(r'transformer\.', '', name)
  1248. tensors: list[tuple[str, Tensor]] = []
  1249. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  1250. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1251. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1252. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1253. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1254. data_torch = torch.cat(
  1255. (
  1256. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1257. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1258. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1259. ),
  1260. dim=0,
  1261. )
  1262. logger.info("re-format attention.linear_qkv.weight")
  1263. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  1264. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1265. data_torch = torch.cat(
  1266. (
  1267. qkv_bias[:, 0, :].reshape((n_embed,)),
  1268. qkv_bias[:, 1, :].reshape((n_embed,)),
  1269. qkv_bias[:, 2, :].reshape((n_embed,)),
  1270. ),
  1271. dim=0,
  1272. )
  1273. logger.info("re-format attention.linear_qkv.bias")
  1274. tensors.append((self.map_tensor_name(name), data_torch))
  1275. return tensors
  1276. @ModelBase.register("MPTForCausalLM")
  1277. class MPTModel(TextModel):
  1278. model_arch = gguf.MODEL_ARCH.MPT
  1279. def set_vocab(self):
  1280. try:
  1281. self._set_vocab_gpt2()
  1282. except Exception:
  1283. # Fallback for SEA-LION model
  1284. self._set_vocab_sentencepiece()
  1285. self.gguf_writer.add_add_bos_token(False)
  1286. self.gguf_writer.add_pad_token_id(3)
  1287. self.gguf_writer.add_eos_token_id(1)
  1288. self.gguf_writer.add_unk_token_id(0)
  1289. def set_gguf_parameters(self):
  1290. block_count = self.hparams["n_layers"]
  1291. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1292. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1293. self.gguf_writer.add_block_count(block_count)
  1294. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  1295. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1296. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  1297. self.gguf_writer.add_head_count_kv(kv_n_heads)
  1298. self.gguf_writer.add_layer_norm_eps(1e-5)
  1299. if self.hparams["attn_config"]["clip_qkv"] is not None:
  1300. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  1301. if self.hparams["attn_config"]["alibi"]:
  1302. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  1303. else:
  1304. self.gguf_writer.add_max_alibi_bias(0.0)
  1305. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1306. del bid # unused
  1307. if "scales" in name:
  1308. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  1309. new_name = new_name.replace("scales", "act.scales")
  1310. else:
  1311. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  1312. return [(new_name, data_torch)]
  1313. @ModelBase.register("OrionForCausalLM")
  1314. class OrionModel(TextModel):
  1315. model_arch = gguf.MODEL_ARCH.ORION
  1316. def set_vocab(self):
  1317. self._set_vocab_sentencepiece()
  1318. def set_gguf_parameters(self):
  1319. block_count = self.hparams["num_hidden_layers"]
  1320. head_count = self.hparams["num_attention_heads"]
  1321. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1322. ctx_length = 0
  1323. if "max_sequence_length" in self.hparams:
  1324. ctx_length = self.hparams["max_sequence_length"]
  1325. elif "max_position_embeddings" in self.hparams:
  1326. ctx_length = self.hparams["max_position_embeddings"]
  1327. elif "model_max_length" in self.hparams:
  1328. ctx_length = self.hparams["model_max_length"]
  1329. else:
  1330. raise ValueError("gguf: can not find ctx length parameter.")
  1331. self.gguf_writer.add_file_type(self.ftype)
  1332. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1333. self.gguf_writer.add_context_length(ctx_length)
  1334. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1335. self.gguf_writer.add_block_count(block_count)
  1336. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1337. self.gguf_writer.add_head_count(head_count)
  1338. self.gguf_writer.add_head_count_kv(head_count_kv)
  1339. # note: config provides rms norm but it is actually layer norm
  1340. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  1341. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  1342. @ModelBase.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  1343. class BaichuanModel(TextModel):
  1344. model_arch = gguf.MODEL_ARCH.BAICHUAN
  1345. def set_vocab(self):
  1346. self._set_vocab_sentencepiece()
  1347. def set_gguf_parameters(self):
  1348. block_count = self.hparams["num_hidden_layers"]
  1349. head_count = self.hparams["num_attention_heads"]
  1350. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1351. ctx_length = 0
  1352. if "max_sequence_length" in self.hparams:
  1353. ctx_length = self.hparams["max_sequence_length"]
  1354. elif "max_position_embeddings" in self.hparams:
  1355. ctx_length = self.hparams["max_position_embeddings"]
  1356. elif "model_max_length" in self.hparams:
  1357. ctx_length = self.hparams["model_max_length"]
  1358. else:
  1359. raise ValueError("gguf: can not find ctx length parameter.")
  1360. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1361. self.gguf_writer.add_context_length(ctx_length)
  1362. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1363. self.gguf_writer.add_block_count(block_count)
  1364. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1365. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1366. self.gguf_writer.add_head_count(head_count)
  1367. self.gguf_writer.add_head_count_kv(head_count_kv)
  1368. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1369. self.gguf_writer.add_file_type(self.ftype)
  1370. rope_scaling = self.hparams.get("rope_scaling") or {}
  1371. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1372. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1373. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1374. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1375. head_count = self.hparams["num_attention_heads"]
  1376. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1377. tensors: list[tuple[str, Tensor]] = []
  1378. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  1379. logger.info(f"Unpacking and permuting layer {bid}")
  1380. tensors = [
  1381. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  1382. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  1383. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  1384. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  1385. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  1386. self._reverse_hf_part(data_torch, 2)),
  1387. ]
  1388. else:
  1389. tensors = [(self.map_tensor_name(name), data_torch)]
  1390. return tensors
  1391. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1392. if n_kv_head is not None and n_head != n_kv_head:
  1393. n_head //= n_kv_head
  1394. return (
  1395. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1396. .swapaxes(1, 2)
  1397. .reshape(weights.shape)
  1398. )
  1399. def _reverse_hf_permute_part(
  1400. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  1401. ) -> Tensor:
  1402. r = weights.shape[0] // 3
  1403. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  1404. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  1405. r = weights.shape[0] // 3
  1406. return weights[r * n_part:r * n_part + r, ...]
  1407. @ModelBase.register("XverseForCausalLM")
  1408. class XverseModel(TextModel):
  1409. model_arch = gguf.MODEL_ARCH.XVERSE
  1410. def set_vocab(self):
  1411. assert (self.dir_model / "tokenizer.json").is_file()
  1412. dir_model = self.dir_model
  1413. hparams = self.hparams
  1414. tokens: list[bytes] = []
  1415. toktypes: list[int] = []
  1416. from transformers import AutoTokenizer
  1417. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  1418. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  1419. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  1420. # because vocab_size is the count of items, and indexes start at 0.
  1421. max_vocab_index = max(tokenizer.get_vocab().values())
  1422. if max_vocab_index >= vocab_size:
  1423. raise ValueError("Vocabulary size exceeds expected maximum size.")
  1424. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  1425. added_vocab = tokenizer.get_added_vocab()
  1426. for token_id in range(vocab_size):
  1427. token_text = reverse_vocab[token_id].encode('utf-8')
  1428. # replace "\x00" to string with length > 0
  1429. if token_text == b"\x00":
  1430. toktype = gguf.TokenType.BYTE # special
  1431. token_text = f"<{token_text}>".encode('utf-8')
  1432. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  1433. toktype = gguf.TokenType.BYTE # special
  1434. elif reverse_vocab[token_id] in added_vocab:
  1435. if tokenizer.added_tokens_decoder[token_id].special:
  1436. toktype = gguf.TokenType.CONTROL
  1437. else:
  1438. toktype = gguf.TokenType.USER_DEFINED
  1439. else:
  1440. toktype = gguf.TokenType.NORMAL
  1441. tokens.append(token_text)
  1442. toktypes.append(toktype)
  1443. self.gguf_writer.add_tokenizer_model("llama")
  1444. self.gguf_writer.add_tokenizer_pre("default")
  1445. self.gguf_writer.add_token_list(tokens)
  1446. self.gguf_writer.add_token_types(toktypes)
  1447. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  1448. special_vocab.add_to_gguf(self.gguf_writer)
  1449. def set_gguf_parameters(self):
  1450. block_count = self.hparams["num_hidden_layers"]
  1451. head_count = self.hparams["num_attention_heads"]
  1452. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1453. ctx_length = 0
  1454. if "max_sequence_length" in self.hparams:
  1455. ctx_length = self.hparams["max_sequence_length"]
  1456. elif "max_position_embeddings" in self.hparams:
  1457. ctx_length = self.hparams["max_position_embeddings"]
  1458. elif "model_max_length" in self.hparams:
  1459. ctx_length = self.hparams["model_max_length"]
  1460. else:
  1461. raise ValueError("gguf: can not find ctx length parameter.")
  1462. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1463. self.gguf_writer.add_context_length(ctx_length)
  1464. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1465. self.gguf_writer.add_block_count(block_count)
  1466. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1467. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1468. self.gguf_writer.add_head_count(head_count)
  1469. self.gguf_writer.add_head_count_kv(head_count_kv)
  1470. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1471. self.gguf_writer.add_file_type(self.ftype)
  1472. rope_scaling = self.hparams.get("rope_scaling") or {}
  1473. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1474. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1475. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1476. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1477. del bid # unused
  1478. head_count = self.hparams["num_attention_heads"]
  1479. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1480. # HF models permute some of the tensors, so we need to undo that
  1481. if name.endswith("q_proj.weight"):
  1482. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1483. if name.endswith("k_proj.weight"):
  1484. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1485. return [(self.map_tensor_name(name), data_torch)]
  1486. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1487. if n_kv_head is not None and n_head != n_kv_head:
  1488. n_head //= n_kv_head
  1489. return (
  1490. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1491. .swapaxes(1, 2)
  1492. .reshape(weights.shape)
  1493. )
  1494. @ModelBase.register("FalconForCausalLM", "RWForCausalLM")
  1495. class FalconModel(TextModel):
  1496. model_arch = gguf.MODEL_ARCH.FALCON
  1497. def set_gguf_parameters(self):
  1498. block_count = self.hparams.get("num_hidden_layers")
  1499. if block_count is None:
  1500. block_count = self.hparams["n_layer"] # old name
  1501. n_head = self.hparams.get("num_attention_heads")
  1502. if n_head is None:
  1503. n_head = self.hparams["n_head"] # old name
  1504. n_head_kv = self.hparams.get("num_kv_heads")
  1505. if n_head_kv is None:
  1506. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1507. self.gguf_writer.add_context_length(2048) # not in config.json
  1508. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1509. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1510. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1511. self.gguf_writer.add_block_count(block_count)
  1512. self.gguf_writer.add_head_count(n_head)
  1513. self.gguf_writer.add_head_count_kv(n_head_kv)
  1514. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1515. self.gguf_writer.add_file_type(self.ftype)
  1516. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1517. del bid # unused
  1518. # QKV tensor transform
  1519. # The original query_key_value tensor contains n_head_kv "kv groups",
  1520. # each consisting of n_head/n_head_kv query weights followed by one key
  1521. # and one value weight (shared by all query heads in the kv group).
  1522. # This layout makes it a big pain to work with in GGML.
  1523. # So we rearrange them here,, so that we have n_head query weights
  1524. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1525. # in contiguous fashion.
  1526. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1527. if "query_key_value" in name:
  1528. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1529. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1530. head_dim = self.hparams["hidden_size"] // n_head
  1531. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1532. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1533. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1534. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1535. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1536. return [(self.map_tensor_name(name), data_torch)]
  1537. @ModelBase.register("GPTBigCodeForCausalLM")
  1538. class StarCoderModel(TextModel):
  1539. model_arch = gguf.MODEL_ARCH.STARCODER
  1540. def set_gguf_parameters(self):
  1541. block_count = self.hparams["n_layer"]
  1542. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1543. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1544. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1545. self.gguf_writer.add_block_count(block_count)
  1546. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1547. self.gguf_writer.add_head_count_kv(1)
  1548. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1549. self.gguf_writer.add_file_type(self.ftype)
  1550. @ModelBase.register("GPTRefactForCausalLM")
  1551. class RefactModel(TextModel):
  1552. model_arch = gguf.MODEL_ARCH.REFACT
  1553. def set_vocab(self):
  1554. super().set_vocab()
  1555. # TODO: how to determine special FIM tokens automatically?
  1556. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1557. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1558. special_vocab._set_special_token("prefix", 1)
  1559. special_vocab._set_special_token("suffix", 3)
  1560. special_vocab._set_special_token("middle", 2)
  1561. special_vocab.chat_template = None # do not add it twice
  1562. special_vocab.add_to_gguf(self.gguf_writer)
  1563. def set_gguf_parameters(self):
  1564. hidden_dim = self.hparams["n_embd"]
  1565. inner_dim = 4 * hidden_dim
  1566. hidden_dim = int(2 * inner_dim / 3)
  1567. multiple_of = 256
  1568. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1569. block_count = self.hparams["n_layer"]
  1570. # refact uses Alibi. So this is from config.json which might be used by training.
  1571. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1572. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1573. self.gguf_writer.add_feed_forward_length(ff_dim)
  1574. self.gguf_writer.add_block_count(block_count)
  1575. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1576. self.gguf_writer.add_head_count_kv(1)
  1577. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1578. self.gguf_writer.add_file_type(self.ftype)
  1579. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1580. hidden_dim = self.hparams["n_embd"]
  1581. inner_dim = 4 * hidden_dim
  1582. hidden_dim = int(2 * inner_dim / 3)
  1583. multiple_of = 256
  1584. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1585. n_head = self.hparams["n_head"]
  1586. n_head_kv = 1
  1587. head_dim = self.hparams["n_embd"] // n_head
  1588. tensors: list[tuple[str, Tensor]] = []
  1589. if bid is not None:
  1590. if name == f"transformer.h.{bid}.attn.kv.weight":
  1591. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1592. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1593. elif name == f"transformer.h.{bid}.attn.q.weight":
  1594. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1595. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1596. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1597. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1598. if len(tensors) == 0:
  1599. tensors.append((self.map_tensor_name(name), data_torch))
  1600. return tensors
  1601. @ModelBase.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1602. class StableLMModel(TextModel):
  1603. model_arch = gguf.MODEL_ARCH.STABLELM
  1604. def set_vocab(self):
  1605. if (self.dir_model / "tokenizer.json").is_file():
  1606. self._set_vocab_gpt2()
  1607. else:
  1608. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1609. self._set_vocab_qwen()
  1610. def set_gguf_parameters(self):
  1611. hparams = self.hparams
  1612. block_count = hparams["num_hidden_layers"]
  1613. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1614. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1615. self.gguf_writer.add_block_count(block_count)
  1616. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1617. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1618. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1619. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1620. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1621. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1622. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1623. self.gguf_writer.add_file_type(self.ftype)
  1624. _q_norms: list[dict[str, Tensor]] | None = None
  1625. _k_norms: list[dict[str, Tensor]] | None = None
  1626. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1627. n_head = self.hparams["num_attention_heads"]
  1628. n_kv_head = self.hparams["num_key_value_heads"]
  1629. if name.find("q_layernorm.norms") != -1:
  1630. assert bid is not None
  1631. if self._q_norms is None:
  1632. self._q_norms = [{} for _ in range(self.block_count)]
  1633. self._q_norms[bid][name] = data_torch
  1634. if len(self._q_norms[bid]) >= n_head:
  1635. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1636. else:
  1637. return []
  1638. if name.find("k_layernorm.norms") != -1:
  1639. assert bid is not None
  1640. if self._k_norms is None:
  1641. self._k_norms = [{} for _ in range(self.block_count)]
  1642. self._k_norms[bid][name] = data_torch
  1643. if len(self._k_norms[bid]) >= n_kv_head:
  1644. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1645. else:
  1646. return []
  1647. return [(self.map_tensor_name(name), data_torch)]
  1648. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1649. datas: list[Tensor] = []
  1650. # extract the norms in order
  1651. for xid in range(n_head):
  1652. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1653. datas.append(norms[ename])
  1654. del norms[ename]
  1655. data_torch = torch.stack(datas, dim=0)
  1656. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1657. new_name = self.map_tensor_name(merged_name)
  1658. return [(new_name, data_torch)]
  1659. def prepare_tensors(self):
  1660. super().prepare_tensors()
  1661. if self._q_norms is not None or self._k_norms is not None:
  1662. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1663. norms = (
  1664. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1665. ) + (
  1666. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1667. )
  1668. if len(norms) > 0:
  1669. raise ValueError(f"Unprocessed norms: {norms}")
  1670. @ModelBase.register(
  1671. "LLaMAForCausalLM",
  1672. "LlamaForCausalLM",
  1673. "MistralForCausalLM",
  1674. "MixtralForCausalLM",
  1675. "VLlama3ForCausalLM",
  1676. "LlavaForConditionalGeneration",
  1677. "VoxtralForConditionalGeneration",
  1678. "LlamaModel")
  1679. class LlamaModel(TextModel):
  1680. model_arch = gguf.MODEL_ARCH.LLAMA
  1681. undo_permute = True
  1682. def __init__(self, *args, **kwargs):
  1683. super().__init__(*args, **kwargs)
  1684. # fix for SmolVLM2, missing `num_attention_heads` in config.json
  1685. if self.hf_arch == "VLlama3ForCausalLM":
  1686. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32)
  1687. def _set_vocab_mistral(self):
  1688. vocab = MistralVocab(self.dir_model)
  1689. logger.info(
  1690. f"Converting tokenizer {vocab.tokenizer_type} of size {vocab.vocab_size}."
  1691. )
  1692. self.gguf_writer.add_tokenizer_model(vocab.gguf_tokenizer_model)
  1693. tokens = []
  1694. scores = []
  1695. toktypes = []
  1696. for text, score, toktype in vocab.all_tokens():
  1697. tokens.append(text)
  1698. scores.append(score)
  1699. toktypes.append(toktype)
  1700. assert len(tokens) == vocab.vocab_size, (
  1701. f"token count ({len(tokens)}) != vocab size ({vocab.vocab_size})"
  1702. )
  1703. if vocab.tokenizer_type == MistralTokenizerType.tekken:
  1704. self.gguf_writer.add_tokenizer_pre("tekken")
  1705. self.gguf_writer.add_token_merges(
  1706. vocab.extract_vocab_merges_from_model()
  1707. )
  1708. logger.info(
  1709. f"Setting bos, eos, unk and pad token IDs to {vocab.bos_id}, {vocab.eos_id}, {vocab.unk_id}, {vocab.pad_id}."
  1710. )
  1711. self.gguf_writer.add_bos_token_id(vocab.bos_id)
  1712. self.gguf_writer.add_eos_token_id(vocab.eos_id)
  1713. self.gguf_writer.add_unk_token_id(vocab.unk_id)
  1714. self.gguf_writer.add_pad_token_id(vocab.pad_id)
  1715. self.gguf_writer.add_token_list(tokens)
  1716. self.gguf_writer.add_token_scores(scores)
  1717. self.gguf_writer.add_token_types(toktypes)
  1718. self.gguf_writer.add_vocab_size(vocab.vocab_size)
  1719. self.gguf_writer.add_add_bos_token(True)
  1720. self.gguf_writer.add_add_eos_token(False)
  1721. template_dir = Path(__file__).parent / "models/templates/"
  1722. if not self.is_mistral_format or not self.disable_mistral_community_chat_template:
  1723. # Log only for Mistral format that the official tokenization and detokenization is via `mistral-common`.
  1724. if self.is_mistral_format:
  1725. logger.info(
  1726. "Using a Mistral community chat template. These templates can be subject to errors in early days or weeks after a release. "
  1727. "Mistral recommends to use `mistral-common` to perform tokenization and detokenization."
  1728. )
  1729. template = MistralModel.get_community_chat_template(vocab, template_dir, self.is_mistral_format)
  1730. self.gguf_writer.add_chat_template(template)
  1731. else:
  1732. logger.info("Not using a Mistral community chat template. Ensure to perform the tokenization and detokenization via `mistral-common`.")
  1733. def set_vocab(self):
  1734. if self.is_mistral_format:
  1735. return self._set_vocab_mistral()
  1736. path_tekken_json = self.dir_model / "tekken.json"
  1737. path_tokenizer_json = self.dir_model / "tokenizer.json"
  1738. if path_tekken_json.is_file() and not path_tokenizer_json.is_file():
  1739. self._set_vocab_mistral()
  1740. try:
  1741. self._set_vocab_sentencepiece()
  1742. except FileNotFoundError:
  1743. try:
  1744. self._set_vocab_llama_hf()
  1745. except (FileNotFoundError, TypeError):
  1746. # Llama 3
  1747. self._set_vocab_gpt2()
  1748. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1749. if self.hparams.get("vocab_size", 32000) == 32016:
  1750. special_vocab = gguf.SpecialVocab(
  1751. self.dir_model, load_merges=False,
  1752. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1753. )
  1754. special_vocab._set_special_token("prefix", 32007)
  1755. special_vocab._set_special_token("suffix", 32008)
  1756. special_vocab._set_special_token("middle", 32009)
  1757. special_vocab._set_special_token("eot", 32010)
  1758. special_vocab.add_to_gguf(self.gguf_writer)
  1759. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1760. if tokenizer_config_file.is_file():
  1761. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1762. tokenizer_config_json = json.load(f)
  1763. if "add_prefix_space" in tokenizer_config_json:
  1764. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1765. # Apply to granite small models only
  1766. if self.hparams.get("vocab_size", 32000) == 49152:
  1767. self.gguf_writer.add_add_bos_token(False)
  1768. def set_gguf_parameters(self):
  1769. super().set_gguf_parameters()
  1770. hparams = self.hparams
  1771. if not self.is_mistral_format:
  1772. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1773. if (rope_dim := hparams.get("head_dim")) is None:
  1774. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1775. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1776. rope_scaling = self.hparams.get("rope_scaling") or {}
  1777. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1778. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1779. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1780. @staticmethod
  1781. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1782. if n_head_kv is not None and n_head != n_head_kv:
  1783. n_head = n_head_kv
  1784. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1785. .swapaxes(1, 2)
  1786. .reshape(weights.shape))
  1787. _experts: list[dict[str, Tensor]] | None = None
  1788. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1789. n_head = self.find_hparam(["n_heads", "num_attention_heads"])
  1790. n_kv_head = self.find_hparam(["n_kv_heads", "num_key_value_heads"])
  1791. vision_prefixes = [
  1792. "vision_encoder.",
  1793. "vision_language_adapter.",
  1794. "patch_merger.",
  1795. "pre_mm_projector_norm",
  1796. ]
  1797. is_multimodal_tensor = "vision_tower" in name \
  1798. or "vision_model" in name \
  1799. or "audio_tower" in name \
  1800. or "model.connector" in name \
  1801. or "multi_modal_projector" in name \
  1802. or any(
  1803. name.startswith(prefix)
  1804. for prefix in vision_prefixes
  1805. )
  1806. if is_multimodal_tensor:
  1807. return [] # skip vision tensors
  1808. elif self.hf_arch == "LlamaModel":
  1809. name = "model." + name
  1810. elif name.startswith("model.text_model"):
  1811. name = name.replace("text_model.", "") # for SmolVLM
  1812. elif name.startswith("language_model."):
  1813. name = name.replace("language_model.", "") # for the rest
  1814. if self.undo_permute:
  1815. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1816. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1817. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1818. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1819. # process the experts separately
  1820. if name.find("block_sparse_moe.experts") != -1:
  1821. n_experts = self.hparams["num_local_experts"]
  1822. assert bid is not None
  1823. if self._experts is None:
  1824. self._experts = [{} for _ in range(self.block_count)]
  1825. self._experts[bid][name] = data_torch
  1826. if len(self._experts[bid]) >= n_experts * 3:
  1827. tensors: list[tuple[str, Tensor]] = []
  1828. # merge the experts into a single 3d tensor
  1829. for wid in ["w1", "w2", "w3"]:
  1830. datas: list[Tensor] = []
  1831. for xid in range(n_experts):
  1832. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1833. datas.append(self._experts[bid][ename])
  1834. del self._experts[bid][ename]
  1835. data_torch = torch.stack(datas, dim=0)
  1836. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1837. new_name = self.map_tensor_name(merged_name)
  1838. tensors.append((new_name, data_torch))
  1839. return tensors
  1840. else:
  1841. return []
  1842. return [(self.map_tensor_name(name), data_torch)]
  1843. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1844. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1845. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1846. base = self.hparams.get("rope_theta", 10000.0)
  1847. if (dim := self.hparams.get("head_dim")) is None:
  1848. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  1849. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1850. factor = rope_scaling.get("factor", 8.0)
  1851. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1852. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1853. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1854. low_freq_wavelen = old_context_len / low_freq_factor
  1855. high_freq_wavelen = old_context_len / high_freq_factor
  1856. # assert low_freq_wavelen != high_freq_wavelen # Errors for Llama4
  1857. rope_factors = []
  1858. for freq in freqs:
  1859. wavelen = 2 * math.pi / freq
  1860. if wavelen < high_freq_wavelen:
  1861. rope_factors.append(1)
  1862. elif wavelen > low_freq_wavelen:
  1863. rope_factors.append(factor)
  1864. else:
  1865. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1866. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1867. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1868. def prepare_tensors(self):
  1869. super().prepare_tensors()
  1870. if self._experts is not None:
  1871. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1872. experts = [k for d in self._experts for k in d.keys()]
  1873. if len(experts) > 0:
  1874. raise ValueError(f"Unprocessed experts: {experts}")
  1875. @ModelBase.register("ArceeForCausalLM")
  1876. class ArceeModel(LlamaModel):
  1877. model_arch = gguf.MODEL_ARCH.ARCEE
  1878. def set_gguf_parameters(self):
  1879. super().set_gguf_parameters()
  1880. self._try_set_pooling_type()
  1881. rope_scaling = self.hparams.get("rope_scaling") or {}
  1882. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  1883. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  1884. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1885. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  1886. @ModelBase.register(
  1887. "LlavaForConditionalGeneration", # pixtral
  1888. "Mistral3ForConditionalGeneration", # mistral small 3.1
  1889. )
  1890. class LlavaVisionModel(MmprojModel):
  1891. img_break_tok_id = -1
  1892. def __init__(self, *args, **kwargs):
  1893. super().__init__(*args, **kwargs)
  1894. if self.hparams.get("model_type") == "pixtral":
  1895. # layer_norm_eps is not in config.json, it is hard-coded in modeling_pixtral.py
  1896. self.hparams["layer_norm_eps"] = self.hparams.get("layer_norm_eps", 1e-5)
  1897. self.img_break_tok_id = self.get_token_id("[IMG_BREAK]")
  1898. elif self.is_mistral_format:
  1899. # hparams is already vision config here so norm_eps is only defined in global_config.
  1900. self.hparams["norm_eps"] = self.global_config.get("norm_eps", None)
  1901. assert self.hparams["norm_eps"] is not None, "norm_eps not found in params.json"
  1902. self.img_break_tok_id = self.find_vparam(["image_break_token_id"])
  1903. else:
  1904. raise ValueError(f"Unsupported model type: {self.hparams['model_type']}")
  1905. logger.info(f"Image break token id: {self.img_break_tok_id}")
  1906. def get_token_id(self, token: str) -> int:
  1907. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1908. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1909. added_tokens_decoder = json.load(f)['added_tokens_decoder']
  1910. for id_, token_data in added_tokens_decoder.items():
  1911. if token_data["content"] == token:
  1912. return int(id_)
  1913. raise ValueError(f"Token '{token}' not found in tokenizer config.")
  1914. def set_gguf_parameters(self):
  1915. super().set_gguf_parameters()
  1916. hparams = self.hparams
  1917. if hparams.get("model_type") == "pixtral":
  1918. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PIXTRAL)
  1919. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  1920. # hidden_act
  1921. if hparams["hidden_act"] == "silu":
  1922. self.gguf_writer.add_vision_use_silu(True)
  1923. elif hparams["hidden_act"] == "gelu":
  1924. self.gguf_writer.add_vision_use_gelu(True)
  1925. else:
  1926. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  1927. # spatial_merge_size
  1928. if "spatial_merge_size" in self.global_config:
  1929. self.gguf_writer.add_vision_spatial_merge_size(self.global_config["spatial_merge_size"])
  1930. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1931. del bid # unused
  1932. n_head = (
  1933. self.hparams["num_attention_heads"] if not self.is_mistral_format else self.find_vparam(["num_attention_heads"])
  1934. )
  1935. n_kv_head = n_head
  1936. valid_prefixes = (
  1937. "multi_modal_projector.",
  1938. "vision_tower.",
  1939. "vision_encoder.",
  1940. "vision_language_adapter.",
  1941. "patch_merger.",
  1942. "pre_mm_projector_norm",
  1943. )
  1944. if any(name.startswith(prefix) for prefix in valid_prefixes):
  1945. # process vision tensors
  1946. if name.endswith(("q_proj.weight", "q_proj.bias")) and not self.is_mistral_format:
  1947. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1948. if name.endswith(("k_proj.weight", "k_proj.bias")) and not self.is_mistral_format:
  1949. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1950. return [(self.map_tensor_name(name), data_torch)]
  1951. embed_key = "embed_tokens.weight" if not self.is_mistral_format else "tok_embeddings.weight"
  1952. if self.img_break_tok_id > 0 and embed_key in name:
  1953. logger.info(f"Extracting [IMG_BREAK] token embedding from {name}")
  1954. # for pixtral model, we need to extract the [IMG_BREAK] token embedding
  1955. img_break_embd = data_torch[self.img_break_tok_id]
  1956. name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK]
  1957. return [(self.map_tensor_name(name), img_break_embd)]
  1958. return [] # skip other tensors
  1959. @ModelBase.register("Idefics3ForConditionalGeneration", "SmolVLMForConditionalGeneration")
  1960. class SmolVLMModel(MmprojModel):
  1961. def __init__(self, *args, **kwargs):
  1962. super().__init__(*args, **kwargs)
  1963. if self.hparams["model_type"] == "smolvlm_vision":
  1964. # fix for SmolVLM2, missing some keys in config.json
  1965. # default values are taken from transformers code
  1966. self.hparams["hidden_size"] = self.hparams.get("hidden_size", 1152)
  1967. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 16)
  1968. self.hparams["intermediate_size"] = self.hparams.get("intermediate_size", 3072)
  1969. def set_gguf_parameters(self):
  1970. super().set_gguf_parameters()
  1971. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.IDEFICS3)
  1972. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  1973. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("scale_factor", 2))
  1974. self.gguf_writer.add_vision_use_gelu(True)
  1975. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1976. if ".embeddings." in name:
  1977. return gguf.GGMLQuantizationType.F32
  1978. return super().tensor_force_quant(name, new_name, bid, n_dims)
  1979. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1980. del bid # unused
  1981. is_vision_tensor = "vision_tower" in name or "vision_model" in name or "model.connector" in name
  1982. if is_vision_tensor:
  1983. return [(self.map_tensor_name(name), data_torch)]
  1984. return [] # skip other tensors
  1985. @ModelBase.register("Llama4ForConditionalGeneration")
  1986. class Llama4Model(LlamaModel):
  1987. model_arch = gguf.MODEL_ARCH.LLAMA4
  1988. undo_permute = False
  1989. def __init__(self, *args, **kwargs):
  1990. super().__init__(*args, **kwargs)
  1991. # IMPORTANT: the normal "intermediate_size" is renamed to "intermediate_size_mlp", we need to undo this
  1992. self.hparams["intermediate_size_moe"] = self.hparams["intermediate_size"]
  1993. self.hparams["intermediate_size"] = self.hparams["intermediate_size_mlp"]
  1994. def set_vocab(self):
  1995. self._set_vocab_gpt2()
  1996. def set_gguf_parameters(self):
  1997. super().set_gguf_parameters()
  1998. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["interleave_moe_layer_step"])
  1999. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
  2000. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  2001. if name.startswith("language_model."):
  2002. name = name.replace("language_model.", "")
  2003. # split the gate_up into gate and up
  2004. if "gate_up_proj" in name:
  2005. name_up = name.replace("gate_up_proj", "up_proj.weight")
  2006. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  2007. dim_half = data_torch.shape[-1] // 2
  2008. gate_proj_weight, up_proj_weight = data_torch.transpose(-1, -2).split(dim_half, dim=-2)
  2009. return [
  2010. (self.map_tensor_name(name_gate), gate_proj_weight),
  2011. (self.map_tensor_name(name_up), up_proj_weight)
  2012. ]
  2013. if name.endswith("down_proj"):
  2014. name += ".weight"
  2015. data_torch = data_torch.transpose(-1, -2)
  2016. if "multi_modal_projector" in name or "vision_model" in name:
  2017. return []
  2018. return super().modify_tensors(data_torch, name, bid)
  2019. @ModelBase.register("Llama4ForConditionalGeneration")
  2020. class Llama4VisionModel(MmprojModel):
  2021. def set_gguf_parameters(self):
  2022. super().set_gguf_parameters()
  2023. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.LLAMA4)
  2024. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams["norm_eps"])
  2025. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / self.hparams["pixel_shuffle_ratio"]))
  2026. assert self.hparams["hidden_act"] == "gelu"
  2027. self.gguf_writer.add_vision_use_gelu(True)
  2028. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2029. del bid # unused
  2030. if "multi_modal_projector" in name or "vision_model" in name:
  2031. # process vision tensors
  2032. if "positional_embedding_vlm" in name and ".weight" not in name:
  2033. name += ".weight"
  2034. if "multi_modal_projector.linear_1" in name:
  2035. # despite the name with number postfix, this is a single fully connected layer
  2036. return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC] + '.weight', data_torch)]
  2037. return [(self.map_tensor_name(name), data_torch)]
  2038. return []
  2039. @ModelBase.register("Mistral3ForConditionalGeneration")
  2040. class Mistral3Model(LlamaModel):
  2041. model_arch = gguf.MODEL_ARCH.LLAMA
  2042. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  2043. name = name.replace("language_model.", "")
  2044. if "multi_modal_projector" in name or "vision_tower" in name:
  2045. return []
  2046. return super().modify_tensors(data_torch, name, bid)
  2047. @ModelBase.register("DeciLMForCausalLM")
  2048. class DeciModel(TextModel):
  2049. model_arch = gguf.MODEL_ARCH.DECI
  2050. @staticmethod
  2051. def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int:
  2052. # DeciLM-specific code
  2053. intermediate_size = int(2 * ffn_mult * n_embd / 3)
  2054. return DeciModel._find_multiple(intermediate_size, 256)
  2055. @staticmethod
  2056. def _find_multiple(n: int, k: int) -> int:
  2057. # DeciLM-specific code
  2058. if n % k == 0:
  2059. return n
  2060. return n + k - (n % k)
  2061. def __init__(self, *args, **kwargs):
  2062. super().__init__(*args, **kwargs)
  2063. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  2064. _block_configs: list[dict[str,Any]] = self.hparams["block_configs"]
  2065. assert self.block_count == len(_block_configs)
  2066. self._num_kv_heads = list()
  2067. self._num_heads = list()
  2068. _ffn_multipliers = list()
  2069. # ***linear attention layer***
  2070. # if n_heads_in_group is None and replace_with_linear is True
  2071. # then _num_kv_heads[il] is 0 and _num_heads[il] is num_attention_heads
  2072. # ***attention-free layer***
  2073. # if n_heads_in_group is None and replace_with_linear is False
  2074. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0
  2075. # ***normal attention-layer***
  2076. # if n_heads_in_group is not None, then
  2077. # _num_kv_heads[il] is num_attention_head // n_heads_in_group and
  2078. # _num_heads[il] is num_attention_head
  2079. # ***dummy layer*** for nemotron 253B
  2080. # if n_heads_in_group is None and ffn_mult is None
  2081. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
  2082. for il in range(len(_block_configs)):
  2083. if _block_configs[il]["attention"]["n_heads_in_group"] is None:
  2084. if _block_configs[il]["attention"]["replace_with_linear"] is True:
  2085. self._num_kv_heads.append(0)
  2086. self._num_heads.append(self.hparams["num_attention_heads"])
  2087. else:
  2088. self._num_kv_heads.append(0)
  2089. self._num_heads.append(0)
  2090. else:
  2091. self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
  2092. self._num_heads.append(self.hparams["num_attention_heads"])
  2093. if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
  2094. _ffn_multipliers.append(0.0)
  2095. else:
  2096. _ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
  2097. assert self.block_count == len(self._num_kv_heads)
  2098. assert self.block_count == len(self._num_heads)
  2099. assert self.block_count == len(_ffn_multipliers)
  2100. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  2101. assert isinstance(self._num_heads, list) and isinstance(self._num_heads[0], int)
  2102. assert isinstance(_ffn_multipliers, list) and isinstance(_ffn_multipliers[0], float)
  2103. self._ffn_dims: list[int] = [
  2104. DeciModel._ffn_mult_to_intermediate_size(multiplier, self.hparams["hidden_size"])
  2105. for multiplier in _ffn_multipliers
  2106. ]
  2107. def set_vocab(self):
  2108. # Please change tokenizer_config.json of Llama-3_1-Nemotron-51B's
  2109. # eos_token from '|eot_id|' to '|end_of_text|'
  2110. if self.hparams.get("vocab_size", 128256) == 128256:
  2111. tokens, toktypes, tokpre = self.get_vocab_base()
  2112. self.gguf_writer.add_tokenizer_model("gpt2")
  2113. self.gguf_writer.add_tokenizer_pre(tokpre)
  2114. self.gguf_writer.add_token_list(tokens)
  2115. self.gguf_writer.add_token_types(toktypes)
  2116. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  2117. special_vocab.add_to_gguf(self.gguf_writer)
  2118. else:
  2119. # DeciLM-7B
  2120. self._set_vocab_llama_hf()
  2121. def set_gguf_parameters(self):
  2122. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  2123. assert self.block_count == len(self._num_kv_heads)
  2124. assert self.block_count == len(self._num_heads)
  2125. assert self.block_count == len(self._ffn_dims)
  2126. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  2127. self.gguf_writer.add_rope_freq_base(rope_theta)
  2128. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2129. self.gguf_writer.add_head_count(self._num_heads)
  2130. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  2131. self.gguf_writer.add_block_count(self.block_count)
  2132. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2133. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2134. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2135. self.gguf_writer.add_key_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2136. self.gguf_writer.add_value_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2137. self.gguf_writer.add_file_type(self.ftype)
  2138. else: # DeciLM-7B
  2139. super().set_gguf_parameters()
  2140. if "num_key_value_heads_per_layer" in self.hparams: # DeciLM-7B
  2141. self._num_kv_heads: list[int] = self.hparams["num_key_value_heads_per_layer"]
  2142. assert self.block_count == len(self._num_kv_heads)
  2143. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2144. hparams = self.hparams
  2145. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2146. if (rope_dim := hparams.get("head_dim")) is None:
  2147. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  2148. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2149. rope_scaling = self.hparams.get("rope_scaling") or {}
  2150. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  2151. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2152. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2153. @staticmethod
  2154. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  2155. if n_head_kv is not None and n_head != n_head_kv:
  2156. n_head = n_head_kv
  2157. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2158. .swapaxes(1, 2)
  2159. .reshape(weights.shape))
  2160. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2161. n_head = self.hparams["num_attention_heads"]
  2162. if bid is not None:
  2163. if "num_key_value_heads_per_layer" in self.hparams:
  2164. n_kv_head = self.hparams["num_key_value_heads_per_layer"][bid]
  2165. elif "block_configs" in self.hparams:
  2166. n_kv_head = self._num_kv_heads[bid]
  2167. n_head = self._num_heads[bid]
  2168. else:
  2169. n_kv_head = self.hparams.get("num_key_value_heads")
  2170. else:
  2171. n_kv_head = self.hparams.get("num_key_value_heads")
  2172. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2173. data_torch = DeciModel.permute(data_torch, n_head, n_head)
  2174. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2175. data_torch = DeciModel.permute(data_torch, n_head, n_kv_head)
  2176. return [(self.map_tensor_name(name), data_torch)]
  2177. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2178. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  2179. if rope_scaling.get("rope_type", '').lower() == "llama3":
  2180. base = self.hparams.get("rope_theta", 10000.0)
  2181. if (dim := self.hparams.get("head_dim")) is None:
  2182. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2183. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  2184. factor = rope_scaling.get("factor", 8.0)
  2185. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  2186. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  2187. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  2188. low_freq_wavelen = old_context_len / low_freq_factor
  2189. high_freq_wavelen = old_context_len / high_freq_factor
  2190. assert low_freq_wavelen != high_freq_wavelen
  2191. rope_factors = []
  2192. for freq in freqs:
  2193. wavelen = 2 * math.pi / freq
  2194. if wavelen < high_freq_wavelen:
  2195. rope_factors.append(1)
  2196. elif wavelen > low_freq_wavelen:
  2197. rope_factors.append(factor)
  2198. else:
  2199. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  2200. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  2201. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  2202. def prepare_tensors(self):
  2203. super().prepare_tensors()
  2204. @ModelBase.register("BitnetForCausalLM")
  2205. class BitnetModel(TextModel):
  2206. model_arch = gguf.MODEL_ARCH.BITNET
  2207. def set_vocab(self):
  2208. self._set_vocab_sentencepiece()
  2209. def set_gguf_parameters(self):
  2210. super().set_gguf_parameters()
  2211. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2212. self.gguf_writer.add_rope_scaling_factor(1.0)
  2213. def weight_quant(self, weight: Tensor) -> Tensor:
  2214. dtype = weight.dtype
  2215. weight = weight.float()
  2216. scale = weight.abs().mean().clamp(min=1e-5)
  2217. iscale = 1 / scale
  2218. # TODO: multiply by the scale directly instead of inverting it twice
  2219. # (this is also unnecessarily doubly inverted upstream)
  2220. # ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
  2221. result = (weight * iscale).round().clamp(-1, 1) / iscale
  2222. return result.type(dtype)
  2223. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2224. new_name = self.map_tensor_name(name)
  2225. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  2226. gguf.MODEL_TENSOR.ATTN_Q,
  2227. gguf.MODEL_TENSOR.ATTN_K,
  2228. gguf.MODEL_TENSOR.ATTN_V,
  2229. gguf.MODEL_TENSOR.ATTN_OUT,
  2230. gguf.MODEL_TENSOR.FFN_UP,
  2231. gguf.MODEL_TENSOR.FFN_DOWN,
  2232. gguf.MODEL_TENSOR.FFN_GATE,
  2233. ]):
  2234. # transform weight into 1/0/-1 (in fp32)
  2235. data_torch = self.weight_quant(data_torch)
  2236. yield (new_name, data_torch)
  2237. @ModelBase.register("GrokForCausalLM", "Grok1ForCausalLM")
  2238. class GrokModel(TextModel):
  2239. model_arch = gguf.MODEL_ARCH.GROK
  2240. def set_vocab(self):
  2241. if (self.dir_model / 'tokenizer.model').is_file():
  2242. self._set_vocab_sentencepiece()
  2243. return
  2244. if not (self.dir_model / 'tokenizer.json').is_file() or not (self.dir_model / 'chat_template.jinja').is_file():
  2245. logger.error('Error: Missing vocab and chat template, download files from https://huggingface.co/alvarobartt/grok-2-tokenizer')
  2246. sys.exit(1)
  2247. self._set_vocab_gpt2()
  2248. def __init__(self, *args, **kwargs):
  2249. super().__init__(*args, **kwargs)
  2250. def set_gguf_parameters(self):
  2251. super().set_gguf_parameters()
  2252. self.gguf_writer.add_attn_logit_softcapping(self.hparams.get("attn_logit_softcapping", 30.0))
  2253. self.gguf_writer.add_router_logit_softcapping(self.hparams.get("router_logit_softcapping", 30.0))
  2254. if (final_logit_softcap := self.hparams.get("final_logit_softcapping")):
  2255. self.gguf_writer.add_final_logit_softcapping(final_logit_softcap)
  2256. if (rope_dim := self.hparams.get("head_dim")) is None:
  2257. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2258. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2259. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2260. # Treat "original" as "yarn", seems to have been a mistake
  2261. if self.hparams.get("rope_type") in ("yarn", "original"):
  2262. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2263. self.gguf_writer.add_rope_scaling_factor(self.hparams["scaling_factor"])
  2264. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["original_max_position_embeddings"])
  2265. self.gguf_writer.add_rope_scaling_yarn_ext_factor(self.hparams["extrapolation_factor"])
  2266. self.gguf_writer.add_rope_scaling_yarn_attn_factor(self.hparams["attn_factor"])
  2267. self.gguf_writer.add_rope_scaling_yarn_beta_fast(self.hparams["beta_fast"])
  2268. self.gguf_writer.add_rope_scaling_yarn_beta_slow(self.hparams["beta_slow"])
  2269. if temp_len := self.hparams.get("attn_temperature_len"):
  2270. self.gguf_writer.add_attn_temperature_length(temp_len)
  2271. self.gguf_writer.add_attn_output_scale(self.hparams.get("attn_output_multiplier", rope_dim**-0.5))
  2272. self.gguf_writer.add_embedding_scale(self.hparams["embedding_multiplier_scale"])
  2273. self.gguf_writer.add_logit_scale(self.hparams["output_multiplier_scale"])
  2274. _experts: list[dict[str, list[Tensor]]] | None = None
  2275. _cur_expert = ""
  2276. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2277. tensors: list[tuple[str, Tensor]] = []
  2278. is_expert = ".moe." in name or ".block_sparse_moe.experts." in name
  2279. if not is_expert:
  2280. tensors.append((self.map_tensor_name(name), data_torch))
  2281. # process the experts separately
  2282. if is_expert or self._cur_expert:
  2283. n_experts = self.hparams["num_local_experts"]
  2284. assert bid is not None
  2285. if self._experts is None:
  2286. self._experts = [{} for _ in range(self.block_count)]
  2287. # concatenate split tensors
  2288. if name in self._experts[bid]:
  2289. self._cur_expert = name
  2290. self._experts[bid][name].append(data_torch)
  2291. return []
  2292. elif is_expert:
  2293. self._cur_expert = name
  2294. self._experts[bid][name] = [data_torch]
  2295. return []
  2296. else:
  2297. self._cur_expert = ""
  2298. for bid in range(self.block_count):
  2299. if len(self._experts[bid]) >= n_experts * 3:
  2300. # merge the experts into a single 3d tensor
  2301. for wid in [("linear", "w1", 0), ("linear_1", "w2", 1), ("linear_v", "w3", 0)]:
  2302. datas: list[Tensor] = []
  2303. for xid in range(n_experts):
  2304. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid[0]}.weight"
  2305. if ename not in self._experts[bid]:
  2306. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid[1]}.weight"
  2307. tensor_list = self._experts[bid][ename]
  2308. datas.append(torch.cat(tensor_list, dim=wid[2]) if len(tensor_list) > 1 else tensor_list[0])
  2309. del self._experts[bid][ename]
  2310. data_torch = torch.stack(datas, dim=0)
  2311. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid[0]}.weight"
  2312. new_name = self.map_tensor_name(merged_name)
  2313. yield (new_name, data_torch)
  2314. yield from tensors
  2315. @ModelBase.register("DbrxForCausalLM")
  2316. class DbrxModel(TextModel):
  2317. model_arch = gguf.MODEL_ARCH.DBRX
  2318. def set_gguf_parameters(self):
  2319. ffn_config = self.hparams["ffn_config"]
  2320. attn_config = self.hparams["attn_config"]
  2321. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  2322. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  2323. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  2324. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  2325. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  2326. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  2327. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  2328. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  2329. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  2330. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  2331. self.gguf_writer.add_layer_norm_eps(1e-5)
  2332. self.gguf_writer.add_file_type(self.ftype)
  2333. logger.info(f"gguf: file type = {self.ftype}")
  2334. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2335. del bid # unused
  2336. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  2337. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  2338. n_embd = self.hparams["d_model"]
  2339. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  2340. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  2341. # But llama.cpp moe graph works differently
  2342. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  2343. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  2344. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2345. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  2346. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2347. experts = False
  2348. for exp_tensor_name in exp_tensor_names.keys():
  2349. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  2350. experts = True
  2351. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  2352. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  2353. data_torch = data_torch.permute(*permute_tensor)
  2354. break
  2355. # map tensor names
  2356. # In MoE models the ffn tensors are typically most of the model weights,
  2357. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  2358. # Every other model has the weight names ending in .weight,
  2359. # let's assume that is the convention which is not the case for dbrx:
  2360. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  2361. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  2362. return [(new_name, data_torch)]
  2363. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  2364. del name, new_name, bid # unused
  2365. return n_dims > 1
  2366. @ModelBase.register("MiniCPMForCausalLM")
  2367. class MiniCPMModel(TextModel):
  2368. model_arch = gguf.MODEL_ARCH.MINICPM
  2369. def set_gguf_parameters(self):
  2370. super().set_gguf_parameters()
  2371. embedding_scale = float(self.hparams["scale_emb"])
  2372. self.gguf_writer.add_embedding_scale(embedding_scale)
  2373. logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}")
  2374. residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5
  2375. self.gguf_writer.add_residual_scale(residual_scale)
  2376. logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}")
  2377. logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"]
  2378. self.gguf_writer.add_logit_scale(logit_scale)
  2379. logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}")
  2380. rope_scaling = self.hparams.get("rope_scaling") or {}
  2381. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "longrope":
  2382. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE)
  2383. logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}")
  2384. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2385. rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2386. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2387. if rope_scaling is not None:
  2388. long_factors = rope_scaling.get('long_factor', None)
  2389. short_factors = rope_scaling.get('short_factor', None)
  2390. if long_factors is None or short_factors is None:
  2391. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2392. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2393. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2394. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2395. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2396. def set_vocab(self):
  2397. self._set_vocab_sentencepiece()
  2398. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2399. del bid # unused
  2400. n_head = self.hparams["num_attention_heads"]
  2401. n_kv_head = self.hparams.get("num_key_value_heads")
  2402. # HF models permute some of the tensors, so we need to undo that
  2403. if name.endswith(("q_proj.weight")):
  2404. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2405. if name.endswith(("k_proj.weight")):
  2406. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2407. return [(self.map_tensor_name(name), data_torch)]
  2408. @ModelBase.register("MiniCPM3ForCausalLM")
  2409. class MiniCPM3Model(TextModel):
  2410. model_arch = gguf.MODEL_ARCH.MINICPM3
  2411. def set_gguf_parameters(self):
  2412. hparams = self.hparams
  2413. self.gguf_writer.add_file_type(self.ftype)
  2414. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2415. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2416. self.gguf_writer.add_block_count(self.block_count)
  2417. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2418. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2419. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  2420. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2421. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2422. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2423. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2424. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2425. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2426. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2427. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2428. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2429. if rope_scaling is not None:
  2430. rope_dims = self.hparams["qk_rope_head_dim"]
  2431. long_factors = rope_scaling.get('long_factor', None)
  2432. short_factors = rope_scaling.get('short_factor', None)
  2433. if long_factors is None or short_factors is None:
  2434. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2435. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2436. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2437. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2438. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2439. def set_vocab(self):
  2440. self._set_vocab_sentencepiece()
  2441. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  2442. if n_kv_head is not None and n_head != n_kv_head:
  2443. n_head //= n_kv_head
  2444. return (
  2445. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2446. .swapaxes(1, 2)
  2447. .reshape(weights.shape)
  2448. )
  2449. @ModelBase.register("QWenLMHeadModel")
  2450. class QwenModel(TextModel):
  2451. model_arch = gguf.MODEL_ARCH.QWEN
  2452. @staticmethod
  2453. def token_bytes_to_string(b):
  2454. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2455. byte_encoder = bytes_to_unicode()
  2456. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2457. @staticmethod
  2458. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2459. parts = [bytes([b]) for b in token]
  2460. while True:
  2461. min_idx = None
  2462. min_rank = None
  2463. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2464. rank = mergeable_ranks.get(pair[0] + pair[1])
  2465. if rank is not None and (min_rank is None or rank < min_rank):
  2466. min_idx = i
  2467. min_rank = rank
  2468. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2469. break
  2470. assert min_idx is not None
  2471. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2472. return parts
  2473. def set_vocab(self):
  2474. self._set_vocab_qwen()
  2475. def set_gguf_parameters(self):
  2476. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2477. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2478. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2479. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2480. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  2481. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2482. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2483. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2484. self.gguf_writer.add_file_type(self.ftype)
  2485. @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM", "Qwen2AudioForConditionalGeneration")
  2486. class Qwen2Model(TextModel):
  2487. model_arch = gguf.MODEL_ARCH.QWEN2
  2488. def set_vocab(self):
  2489. try:
  2490. self._set_vocab_sentencepiece()
  2491. except FileNotFoundError:
  2492. self._set_vocab_gpt2()
  2493. def set_gguf_parameters(self):
  2494. super().set_gguf_parameters()
  2495. self._try_set_pooling_type()
  2496. rope_scaling = self.hparams.get("rope_scaling") or {}
  2497. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2498. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2499. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2500. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2501. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2502. if self.hf_arch == "Qwen2Model":
  2503. name = f"model.{name}" # map to Qwen2ForCausalLM tensors
  2504. if "language_model." in name:
  2505. name = name.replace("language_model.", "") # for InternVL
  2506. if name.startswith("mlp") or name.startswith("multi_modal_projector") \
  2507. or name.startswith("vision_model") or name.startswith("audio_tower") \
  2508. or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector"):
  2509. # skip vision and audio tensors
  2510. return []
  2511. yield from super().modify_tensors(data_torch, name, bid)
  2512. @ModelBase.register("DreamModel")
  2513. class DreamModel(TextModel):
  2514. model_arch = gguf.MODEL_ARCH.DREAM
  2515. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  2516. tokens: list[str] = []
  2517. toktypes: list[int] = []
  2518. from transformers import AutoTokenizer
  2519. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  2520. vocab_dict = tokenizer.get_vocab()
  2521. vocab_size = self.hparams.get("vocab_size", len(vocab_dict))
  2522. assert max(vocab_dict.values()) < vocab_size
  2523. tokpre = self.get_vocab_base_pre(tokenizer)
  2524. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab_dict.items()}
  2525. added_vocab = tokenizer.get_added_vocab()
  2526. for i in range(vocab_size):
  2527. if i not in reverse_vocab:
  2528. tokens.append(f"[PAD{i}]")
  2529. toktypes.append(gguf.TokenType.UNUSED)
  2530. elif reverse_vocab[i] in added_vocab:
  2531. tokens.append(reverse_vocab[i])
  2532. # Check if it's a special token - treat special tokens as CONTROL tokens
  2533. if hasattr(tokenizer, 'added_tokens_decoder') and i in tokenizer.added_tokens_decoder:
  2534. if tokenizer.added_tokens_decoder[i].special:
  2535. toktypes.append(gguf.TokenType.CONTROL)
  2536. else:
  2537. toktypes.append(gguf.TokenType.USER_DEFINED)
  2538. else:
  2539. # Fallback: treat all added vocab as control tokens for special tokens like <|im_start|>
  2540. toktypes.append(gguf.TokenType.CONTROL)
  2541. else:
  2542. tokens.append(reverse_vocab[i])
  2543. toktypes.append(gguf.TokenType.NORMAL)
  2544. return tokens, toktypes, tokpre
  2545. def set_vocab(self):
  2546. try:
  2547. self._set_vocab_sentencepiece()
  2548. except FileNotFoundError:
  2549. self._set_vocab_gpt2()
  2550. def set_gguf_parameters(self):
  2551. super().set_gguf_parameters()
  2552. self._try_set_pooling_type()
  2553. # Dream models use non-causal attention for diffusion
  2554. self.gguf_writer.add_causal_attention(False)
  2555. # Handle RoPE scaling similar to Qwen2
  2556. rope_scaling = self.hparams.get("rope_scaling") or {}
  2557. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2558. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2559. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2560. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2561. # Add Dream-specific parameters
  2562. mask_token_id = self.hparams.get("mask_token_id")
  2563. if mask_token_id is not None:
  2564. self.gguf_writer.add_mask_token_id(mask_token_id)
  2565. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2566. # Dream model tensors should be mapped directly since it's the base model
  2567. yield from super().modify_tensors(data_torch, name, bid)
  2568. @ModelBase.register("LLaDAModelLM")
  2569. class LLaDAModel(TextModel):
  2570. model_arch = gguf.MODEL_ARCH.LLADA
  2571. undo_permute = True
  2572. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  2573. tokens: list[str] = []
  2574. toktypes: list[int] = []
  2575. from transformers import AutoTokenizer
  2576. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  2577. vocab_dict = tokenizer.get_vocab()
  2578. vocab_size = self.hparams.get("vocab_size", len(vocab_dict))
  2579. assert max(vocab_dict.values()) < vocab_size
  2580. tokpre = self.get_vocab_base_pre(tokenizer)
  2581. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab_dict.items()}
  2582. added_vocab = tokenizer.get_added_vocab()
  2583. for i in range(vocab_size):
  2584. if i not in reverse_vocab:
  2585. tokens.append(f"[PAD{i}]")
  2586. toktypes.append(gguf.TokenType.UNUSED)
  2587. elif reverse_vocab[i] in added_vocab:
  2588. tokens.append(reverse_vocab[i])
  2589. # Check if it's a special token - treat special tokens as CONTROL tokens
  2590. if hasattr(tokenizer, 'added_tokens_decoder') and i in tokenizer.added_tokens_decoder:
  2591. if tokenizer.added_tokens_decoder[i].special:
  2592. toktypes.append(gguf.TokenType.CONTROL)
  2593. else:
  2594. toktypes.append(gguf.TokenType.USER_DEFINED)
  2595. else:
  2596. # Fallback: treat all added vocab as control tokens for special tokens like <|im_start|>
  2597. toktypes.append(gguf.TokenType.CONTROL)
  2598. else:
  2599. tokens.append(reverse_vocab[i])
  2600. toktypes.append(gguf.TokenType.NORMAL)
  2601. return tokens, toktypes, tokpre
  2602. def set_vocab(self):
  2603. self._set_vocab_gpt2()
  2604. # LLaDA specific parameters
  2605. self.gguf_writer.add_add_bos_token(True)
  2606. def set_gguf_parameters(self):
  2607. super().set_gguf_parameters()
  2608. self._try_set_pooling_type()
  2609. # Add parameters similar to LlamaModel
  2610. hparams = self.hparams
  2611. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2612. if (rope_dim := hparams.get("head_dim")) is None:
  2613. n_heads = hparams.get("num_attention_heads", hparams.get("n_heads"))
  2614. rope_dim = hparams.get("hidden_size", hparams.get("d_model")) // n_heads
  2615. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2616. # Set context length for LLaDA
  2617. context_length = self.hparams.get("max_sequence_length", 4096)
  2618. self.gguf_writer.add_context_length(context_length)
  2619. # Set embedding length (dimension size)
  2620. embedding_length = self.hparams.get("d_model", 4096)
  2621. self.gguf_writer.add_embedding_length(embedding_length)
  2622. # Set feed forward length (MLP hidden size)
  2623. feed_forward_length = self.hparams.get("mlp_hidden_size", 12288)
  2624. self.gguf_writer.add_feed_forward_length(feed_forward_length)
  2625. # LLaDA models use non-causal attention for diffusion, similar to Dream
  2626. self.gguf_writer.add_causal_attention(False)
  2627. # LLaDA models don't shift their logits
  2628. self.gguf_writer.add_diffusion_shift_logits(False)
  2629. @staticmethod
  2630. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  2631. if n_head_kv is not None and n_head != n_head_kv:
  2632. n_head = n_head_kv
  2633. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2634. .swapaxes(1, 2)
  2635. .reshape(weights.shape))
  2636. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2637. n_head = self.hparams.get("num_attention_heads", self.hparams.get("n_heads"))
  2638. n_kv_head = self.hparams.get("num_key_value_heads", self.hparams.get("n_kv_heads"))
  2639. if self.undo_permute:
  2640. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2641. data_torch = LLaDAModel.permute(data_torch, n_head, n_head)
  2642. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2643. data_torch = LLaDAModel.permute(data_torch, n_head, n_kv_head)
  2644. # LLaDA model tensors should be mapped directly since it's the base model
  2645. yield from super().modify_tensors(data_torch, name, bid)
  2646. @ModelBase.register("Ernie4_5_ForCausalLM", "Ernie4_5ForCausalLM")
  2647. class Ernie4_5Model(TextModel):
  2648. model_arch = gguf.MODEL_ARCH.ERNIE4_5
  2649. def set_vocab(self):
  2650. self._set_vocab_sentencepiece()
  2651. def set_gguf_parameters(self):
  2652. super().set_gguf_parameters()
  2653. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2654. num_heads = self.hparams["num_attention_heads"]
  2655. num_kv_heads = self.hparams["num_key_value_heads"]
  2656. if (head_dim := self.hparams.get("head_dim")) is None:
  2657. head_dim = self.hparams["hidden_size"] // num_heads
  2658. if "ernie." in name:
  2659. name = name.replace("ernie.", "model.")
  2660. # split the qkv weights
  2661. # qkv_proj shape: [(num_heads + 2 * num_kv_heads) * head_dim, hidden_size]
  2662. if "qkv_proj" in name:
  2663. name_q = name.replace("qkv_proj.weight", "q_proj.weight")
  2664. name_k = name.replace("qkv_proj.weight", "k_proj.weight")
  2665. name_v = name.replace("qkv_proj.weight", "v_proj.weight")
  2666. total_q_dim = num_heads * head_dim
  2667. total_k_dim = num_kv_heads * head_dim
  2668. total_v_dim = num_kv_heads * head_dim
  2669. q_proj_weight, k_proj_weight, v_proj_weight = data_torch.split([total_q_dim, total_k_dim, total_v_dim], dim=0)
  2670. return [
  2671. (self.map_tensor_name(name_q), q_proj_weight),
  2672. (self.map_tensor_name(name_k), k_proj_weight),
  2673. (self.map_tensor_name(name_v), v_proj_weight)
  2674. ]
  2675. # split the up_gate_proj into gate and up
  2676. # up_gate_proj shape: [2 * intermediate_size, hidden_size]
  2677. if "up_gate_proj" in name:
  2678. name_up = name.replace("up_gate_proj.weight", "up_proj.weight")
  2679. name_gate = name.replace("up_gate_proj.weight", "gate_proj.weight")
  2680. dim_half = data_torch.shape[0] // 2
  2681. gate_proj_weight, up_proj_weight = data_torch.split(dim_half, dim=0)
  2682. return [
  2683. (self.map_tensor_name(name_gate), gate_proj_weight),
  2684. (self.map_tensor_name(name_up), up_proj_weight)
  2685. ]
  2686. return [(self.map_tensor_name(name), data_torch)]
  2687. @ModelBase.register("Ernie4_5_MoeForCausalLM")
  2688. class Ernie4_5MoeModel(Ernie4_5Model):
  2689. model_arch = gguf.MODEL_ARCH.ERNIE4_5_MOE
  2690. _experts: list[dict[str, Tensor]] | None = None
  2691. def __init__(self, *args, **kwargs):
  2692. super().__init__(*args, **kwargs)
  2693. self._experts = [{} for _ in range(self.block_count)]
  2694. def set_gguf_parameters(self):
  2695. super().set_gguf_parameters()
  2696. self.gguf_writer.add_expert_count(self.hparams["moe_num_experts"])
  2697. self.gguf_writer.add_expert_used_count(self.hparams["moe_k"])
  2698. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["moe_layer_interval"])
  2699. self.gguf_writer.add_leading_dense_block_count(self.hparams["moe_layer_start_index"])
  2700. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2701. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2702. if (shared_expert_count := self.hparams.get('moe_num_shared_experts')) is not None:
  2703. self.gguf_writer.add_expert_shared_count(shared_expert_count)
  2704. if shared_expert_count > 0 and (shared_expert_intermediate_size := self.hparams.get('intermediate_size')) is not None and (num_key_value_heads := self.hparams.get('num_key_value_heads')) is not None:
  2705. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size // num_key_value_heads)
  2706. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2707. # Modify correction bias name as in DeepseekV2
  2708. if name.endswith("e_score_correction_bias"):
  2709. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  2710. # skip Multi-Token Prediction (MTP) layers (again, same as DeepseekV2)
  2711. match = re.match(r"model.mtp_block.(\d+)", name)
  2712. if match:
  2713. return []
  2714. # skip all other MTP tensors for now
  2715. match = re.match(r"model.mtp_emb_norm.(\d+)", name)
  2716. if match:
  2717. return []
  2718. match = re.match(r"model.mtp_hidden_norm.(\d+)", name)
  2719. if match:
  2720. return []
  2721. match = re.match(r"model.mtp_linear_proj.(\d+)", name)
  2722. if match:
  2723. return []
  2724. # process the experts separately
  2725. if name.find("mlp.experts") != -1:
  2726. n_experts = self.hparams["moe_num_experts"]
  2727. assert bid is not None
  2728. if self._experts is None:
  2729. self._experts = [{} for _ in range(self.block_count)]
  2730. self._experts[bid][name] = data_torch
  2731. if len(self._experts[bid]) >= n_experts * 3:
  2732. tensors: list[tuple[str, Tensor]] = []
  2733. # merge the experts into a single 3d tensor
  2734. for w_name in ["gate_proj", "up_proj", "down_proj"]:
  2735. datas: list[Tensor] = []
  2736. for xid in range(n_experts):
  2737. ename_to_retrieve = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2738. datas.append(self._experts[bid][ename_to_retrieve])
  2739. del self._experts[bid][ename_to_retrieve]
  2740. data_torch = torch.stack(datas, dim=0)
  2741. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2742. new_name = self.map_tensor_name(merged_name)
  2743. tensors.append((new_name, data_torch))
  2744. return tensors
  2745. else:
  2746. return []
  2747. return [(self.map_tensor_name(name), data_torch)]
  2748. def prepare_tensors(self):
  2749. super().prepare_tensors()
  2750. if self._experts is not None:
  2751. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2752. experts = [k for d in self._experts for k in d.keys()]
  2753. if len(experts) > 0:
  2754. raise ValueError(f"Unprocessed experts: {experts}")
  2755. @ModelBase.register(
  2756. "Qwen2VLModel",
  2757. "Qwen2VLForConditionalGeneration",
  2758. "Qwen2_5_VLForConditionalGeneration",
  2759. "Qwen2_5OmniModel",
  2760. )
  2761. class Qwen2VLModel(TextModel):
  2762. model_arch = gguf.MODEL_ARCH.QWEN2VL
  2763. def set_gguf_parameters(self):
  2764. super().set_gguf_parameters()
  2765. mrope_section = self.hparams["rope_scaling"]["mrope_section"]
  2766. mrope_section += [0] * max(0, 4 - len(mrope_section))
  2767. self.gguf_writer.add_rope_dimension_sections(mrope_section)
  2768. def set_vocab(self):
  2769. try:
  2770. self._set_vocab_sentencepiece()
  2771. except FileNotFoundError:
  2772. self._set_vocab_gpt2()
  2773. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2774. del bid # unused
  2775. if name.startswith("thinker."):
  2776. name = name.replace("thinker.", "")
  2777. if name.startswith("visual") or name.startswith("audio") or \
  2778. name.startswith("talker") or name.startswith("token2wav"):
  2779. # skip multimodal tensors
  2780. return []
  2781. return [(self.map_tensor_name(name), data_torch)]
  2782. @ModelBase.register("Qwen2VLModel", "Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2783. class Qwen2VLVisionModel(MmprojModel):
  2784. def __init__(self, *args, **kwargs):
  2785. super().__init__(*args, **kwargs)
  2786. assert self.hparams_vision is not None
  2787. self.hparams_vision["image_size"] = self.hparams_vision.get("image_size", 560)
  2788. # rename config.json values
  2789. self.hparams_vision["num_attention_heads"] = self.hparams_vision.get("num_heads")
  2790. self.hparams_vision["num_hidden_layers"] = self.hparams_vision.get("depth")
  2791. if "embed_dim" in self.hparams_vision: # qwen2vl
  2792. self.hparams_vision["intermediate_size"] = self.hparams_vision.get("hidden_size")
  2793. self.hparams_vision["hidden_size"] = self.hparams_vision.get("embed_dim")
  2794. def set_gguf_parameters(self):
  2795. super().set_gguf_parameters()
  2796. assert self.hparams_vision is not None
  2797. hparams = self.hparams_vision
  2798. model_type = self.global_config['model_type']
  2799. if model_type == 'qwen2_vl':
  2800. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL)
  2801. elif model_type == 'qwen2_5_vl' or model_type == 'qwen2_5_omni':
  2802. if model_type == 'qwen2_5_omni':
  2803. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25O)
  2804. else:
  2805. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL)
  2806. self.gguf_writer.add_vision_use_silu(True)
  2807. # find n_wa_pattern (window attention pattern)
  2808. fullatt_block_indexes = hparams.get("fullatt_block_indexes")
  2809. assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
  2810. n_wa_pattern = fullatt_block_indexes[0] + 1
  2811. # validate n_wa_pattern
  2812. for i in range(1, len(fullatt_block_indexes)):
  2813. if fullatt_block_indexes[i] - fullatt_block_indexes[i - 1] != n_wa_pattern:
  2814. raise ValueError(f"Invalid fullatt_block_indexes: {fullatt_block_indexes}")
  2815. self.gguf_writer.add_vision_n_wa_pattern(n_wa_pattern)
  2816. else:
  2817. raise ValueError(f"Unknown QwenVL model type: {self.global_config['model_type']}")
  2818. # default values below are taken from HF tranformers code
  2819. self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
  2820. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2821. if ".position_embd." in new_name:
  2822. return gguf.GGMLQuantizationType.F32
  2823. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2824. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2825. del bid # unused
  2826. if name.startswith("visual."):
  2827. # process visual tensors
  2828. # split QKV tensors if needed
  2829. if ".qkv." in name:
  2830. if data_torch.ndim == 2: # weight
  2831. c3, _ = data_torch.shape
  2832. else: # bias
  2833. c3 = data_torch.shape[0]
  2834. assert c3 % 3 == 0
  2835. c = c3 // 3
  2836. wq = data_torch[:c]
  2837. wk = data_torch[c: c * 2]
  2838. wv = data_torch[c * 2:]
  2839. return [
  2840. (self.map_tensor_name(name.replace("qkv", "q")), wq),
  2841. (self.map_tensor_name(name.replace("qkv", "k")), wk),
  2842. (self.map_tensor_name(name.replace("qkv", "v")), wv),
  2843. ]
  2844. elif 'patch_embed.proj.weight' in name:
  2845. # split Conv3D into Conv2Ds
  2846. c1, c2, kt, kh, kw = data_torch.shape
  2847. del c1, c2, kh, kw # unused
  2848. assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
  2849. return [
  2850. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]),
  2851. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]),
  2852. ]
  2853. else:
  2854. return [(self.map_tensor_name(name), data_torch)]
  2855. return [] # skip other tensors
  2856. @ModelBase.register("Qwen2_5OmniModel")
  2857. class Qwen25OmniModel(Qwen2VLVisionModel):
  2858. has_vision_encoder = True
  2859. has_audio_encoder = True
  2860. def __init__(self, *args, **kwargs):
  2861. super().__init__(*args, **kwargs)
  2862. assert self.hparams_audio is not None
  2863. self.hparams_audio["hidden_size"] = self.hparams_audio["d_model"]
  2864. self.hparams_audio["intermediate_size"] = self.hparams_audio["encoder_ffn_dim"]
  2865. self.hparams_audio["num_attention_heads"] = self.hparams_audio["encoder_attention_heads"]
  2866. def set_gguf_parameters(self):
  2867. super().set_gguf_parameters()
  2868. assert self.hparams_audio is not None
  2869. self.gguf_writer.add_audio_num_mel_bins(self.hparams_audio["num_mel_bins"])
  2870. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams_audio.get("layer_norm_eps", 1e-5))
  2871. def get_vision_config(self) -> dict[str, Any] | None:
  2872. return self.global_config["thinker_config"].get("vision_config")
  2873. def get_audio_config(self) -> dict[str, Any] | None:
  2874. return self.global_config["thinker_config"].get("audio_config")
  2875. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2876. # SinusoidsPositionEmbedding
  2877. assert self.hparams_audio is not None
  2878. max_timescale = 10000
  2879. length = 1500
  2880. channels = self.hparams_audio["hidden_size"]
  2881. log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
  2882. inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float())
  2883. scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
  2884. pos_embd = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1).to(dtype=torch.float32)
  2885. yield ("audio_tower.embed_positions.weight", pos_embd)
  2886. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2887. if ".conv" in name and ".weight" in name:
  2888. return gguf.GGMLQuantizationType.F16
  2889. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2890. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2891. if name.startswith("thinker."):
  2892. name = name.replace("thinker.", "")
  2893. if name.startswith("audio_tower"):
  2894. # process audio tensors
  2895. if "conv1.bias" in name or "conv2.bias" in name:
  2896. # transpose conv1 and conv2 bias
  2897. data_torch = data_torch.unsqueeze(-1)
  2898. if "audio_bos_eos_token" in name:
  2899. # this tensor is left unused in transformers code
  2900. # https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809
  2901. return []
  2902. return [(self.map_tensor_name(name), data_torch)]
  2903. return super().modify_tensors(data_torch, name, bid)
  2904. @ModelBase.register("InternVisionModel")
  2905. class InternVisionModel(MmprojModel):
  2906. def set_gguf_parameters(self):
  2907. assert self.hparams_vision is not None
  2908. if isinstance(self.hparams_vision['image_size'], list):
  2909. self.hparams_vision['image_size'] = self.hparams_vision['image_size'][0]
  2910. if isinstance(self.hparams_vision['patch_size'], list):
  2911. self.hparams_vision['patch_size'] = self.hparams_vision['patch_size'][0]
  2912. super().set_gguf_parameters()
  2913. hparams = self.hparams
  2914. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.INTERNVL)
  2915. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  2916. # hidden_act
  2917. if hparams["hidden_act"] == "silu":
  2918. self.gguf_writer.add_vision_use_silu(True)
  2919. elif hparams["hidden_act"] == "gelu":
  2920. self.gguf_writer.add_vision_use_gelu(True)
  2921. else:
  2922. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  2923. # downsample_ratio
  2924. downsample_ratio = self.global_config.get("downsample_ratio")
  2925. assert downsample_ratio is not None
  2926. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / downsample_ratio))
  2927. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2928. if ".position_embd." in new_name:
  2929. return gguf.GGMLQuantizationType.F32
  2930. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2931. def _mapping_interns1_name(self, name):
  2932. names_map = {
  2933. "model.multi_modal_projector.layer_norm.bias": "mlp1.0.bias",
  2934. "model.multi_modal_projector.layer_norm.weight": "mlp1.0.weight",
  2935. "model.multi_modal_projector.linear_1.bias": "mlp1.1.bias",
  2936. "model.multi_modal_projector.linear_1.weight": "mlp1.1.weight",
  2937. "model.multi_modal_projector.linear_2.bias": "mlp1.3.bias",
  2938. "model.multi_modal_projector.linear_2.weight": "mlp1.3.weight",
  2939. }
  2940. if name in names_map:
  2941. name = names_map[name]
  2942. return name
  2943. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2944. del bid # unused
  2945. vision_prefix = ['vision_model', 'mlp', 'model.vision_tower', 'model.multi_modal_projector']
  2946. # deal with intern-s1 special case
  2947. name = self._mapping_interns1_name(name)
  2948. if any([name.startswith(prefix) for prefix in vision_prefix]):
  2949. # process visual tensors
  2950. # correct name
  2951. if name.startswith("vision_model"):
  2952. name = "vision_tower." + name
  2953. if (".ls" in name or ".lambda_" in name or "position_embedding" in name) and not name.endswith(".weight"):
  2954. name += ".weight"
  2955. # split QKV tensors if needed
  2956. if ".qkv." in name:
  2957. if data_torch.ndim == 2: # weight
  2958. c3, _ = data_torch.shape
  2959. else: # bias
  2960. c3 = data_torch.shape[0]
  2961. assert c3 % 3 == 0
  2962. c = c3 // 3
  2963. wq = data_torch[:c]
  2964. wk = data_torch[c: c * 2]
  2965. wv = data_torch[c * 2:]
  2966. return [
  2967. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.q_proj")), wq),
  2968. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.k_proj")), wk),
  2969. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.v_proj")), wv),
  2970. ]
  2971. return [(self.map_tensor_name(name), data_torch)]
  2972. return [] # skip other tensors
  2973. @ModelBase.register("WavTokenizerDec")
  2974. class WavTokenizerDecModel(TextModel):
  2975. model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC
  2976. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2977. del bid # unused
  2978. if \
  2979. name.endswith("codebook.cluster_size") or \
  2980. name.endswith("codebook.embed_avg") or \
  2981. name.endswith("codebook.inited"):
  2982. logger.debug(f"Skipping {name!r}")
  2983. return []
  2984. logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}")
  2985. return [(self.map_tensor_name(name), data_torch)]
  2986. def set_vocab(self):
  2987. self._set_vocab_none()
  2988. def set_gguf_parameters(self):
  2989. super().set_gguf_parameters()
  2990. self.gguf_writer.add_vocab_size (self.hparams["vocab_size"])
  2991. self.gguf_writer.add_features_length (self.hparams["n_embd_features"])
  2992. self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"])
  2993. self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"])
  2994. self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"])
  2995. self.gguf_writer.add_posnet_embedding_length(self.hparams["posnet"]["n_embd"])
  2996. self.gguf_writer.add_posnet_block_count (self.hparams["posnet"]["n_layer"])
  2997. self.gguf_writer.add_convnext_embedding_length(self.hparams["convnext"]["n_embd"])
  2998. self.gguf_writer.add_convnext_block_count (self.hparams["convnext"]["n_layer"])
  2999. self.gguf_writer.add_causal_attention(False)
  3000. @ModelBase.register("Qwen2MoeForCausalLM")
  3001. class Qwen2MoeModel(TextModel):
  3002. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  3003. def set_gguf_parameters(self):
  3004. super().set_gguf_parameters()
  3005. if (n_experts := self.hparams.get("num_experts")) is not None:
  3006. self.gguf_writer.add_expert_count(n_experts)
  3007. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  3008. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  3009. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  3010. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  3011. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  3012. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  3013. # YaRN is not enabled by default
  3014. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  3015. rope_scaling = self.hparams.get("rope_scaling") or {}
  3016. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  3017. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  3018. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3019. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  3020. _experts: list[dict[str, Tensor]] | None = None
  3021. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3022. # process the experts separately
  3023. name = name.replace("language_model.", "") # InternVL
  3024. if name.startswith("mlp") or name.startswith("vision_model") or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector"):
  3025. # skip visual tensors
  3026. return []
  3027. if name.find("experts") != -1:
  3028. n_experts = self.hparams["num_experts"]
  3029. assert bid is not None
  3030. if self._experts is None:
  3031. self._experts = [{} for _ in range(self.block_count)]
  3032. self._experts[bid][name] = data_torch
  3033. if len(self._experts[bid]) >= n_experts * 3:
  3034. tensors: list[tuple[str, Tensor]] = []
  3035. # merge the experts into a single 3d tensor
  3036. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3037. datas: list[Tensor] = []
  3038. for xid in range(n_experts):
  3039. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3040. datas.append(self._experts[bid][ename])
  3041. del self._experts[bid][ename]
  3042. data_torch = torch.stack(datas, dim=0)
  3043. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3044. new_name = self.map_tensor_name(merged_name)
  3045. tensors.append((new_name, data_torch))
  3046. return tensors
  3047. else:
  3048. return []
  3049. return [(self.map_tensor_name(name), data_torch)]
  3050. def prepare_tensors(self):
  3051. super().prepare_tensors()
  3052. if self._experts is not None:
  3053. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3054. experts = [k for d in self._experts for k in d.keys()]
  3055. if len(experts) > 0:
  3056. raise ValueError(f"Unprocessed experts: {experts}")
  3057. @ModelBase.register("Qwen3ForCausalLM")
  3058. class Qwen3Model(Qwen2Model):
  3059. model_arch = gguf.MODEL_ARCH.QWEN3
  3060. def __init__(self, *args, **kwargs):
  3061. super().__init__(*args, **kwargs)
  3062. hparams = ModelBase.load_hparams(self.dir_model, is_mistral_format=False)
  3063. self.origin_hf_arch = hparams.get('architectures', [None])[0]
  3064. def set_vocab(self):
  3065. # deal with intern-s1-mini
  3066. if self.origin_hf_arch == 'InternS1ForConditionalGeneration':
  3067. self._set_vocab_interns1()
  3068. return
  3069. super().set_vocab()
  3070. @ModelBase.register("Qwen3MoeForCausalLM")
  3071. class Qwen3MoeModel(Qwen2MoeModel):
  3072. model_arch = gguf.MODEL_ARCH.QWEN3MOE
  3073. def __init__(self, *args, **kwargs):
  3074. super().__init__(*args, **kwargs)
  3075. hparams = ModelBase.load_hparams(self.dir_model, False)
  3076. self.origin_hf_arch = hparams.get('architectures', [None])[0]
  3077. def set_vocab(self):
  3078. # deal with intern-s1
  3079. if self.origin_hf_arch == 'InternS1ForConditionalGeneration':
  3080. self._set_vocab_interns1()
  3081. return
  3082. super().set_vocab()
  3083. @ModelBase.register("GPT2LMHeadModel")
  3084. class GPT2Model(TextModel):
  3085. model_arch = gguf.MODEL_ARCH.GPT2
  3086. def set_gguf_parameters(self):
  3087. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  3088. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  3089. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  3090. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  3091. self.gguf_writer.add_head_count(self.hparams["n_head"])
  3092. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3093. self.gguf_writer.add_file_type(self.ftype)
  3094. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3095. del bid # unused
  3096. tensors: list[tuple[str, Tensor]] = []
  3097. # we don't need these
  3098. if name.endswith((".attn.bias", ".attn.masked_bias")):
  3099. return tensors
  3100. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  3101. data_torch = data_torch.transpose(1, 0)
  3102. new_name = self.map_tensor_name(name)
  3103. tensors.append((new_name, data_torch))
  3104. return tensors
  3105. @ModelBase.register("PhiForCausalLM")
  3106. class Phi2Model(TextModel):
  3107. model_arch = gguf.MODEL_ARCH.PHI2
  3108. def set_gguf_parameters(self):
  3109. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  3110. rot_pct = self.find_hparam(["partial_rotary_factor"])
  3111. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3112. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3113. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  3114. self.gguf_writer.add_embedding_length(n_embd)
  3115. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  3116. self.gguf_writer.add_block_count(block_count)
  3117. self.gguf_writer.add_head_count(n_head)
  3118. self.gguf_writer.add_head_count_kv(n_head)
  3119. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  3120. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  3121. self.gguf_writer.add_file_type(self.ftype)
  3122. self.gguf_writer.add_add_bos_token(False)
  3123. @ModelBase.register("Phi3ForCausalLM")
  3124. class Phi3MiniModel(TextModel):
  3125. model_arch = gguf.MODEL_ARCH.PHI3
  3126. def set_vocab(self):
  3127. # Phi-4 model uses GPT2Tokenizer
  3128. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3129. if tokenizer_config_file.is_file():
  3130. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3131. tokenizer_config_json = json.load(f)
  3132. tokenizer_class = tokenizer_config_json['tokenizer_class']
  3133. if tokenizer_class == 'GPT2Tokenizer':
  3134. return self._set_vocab_gpt2()
  3135. from sentencepiece import SentencePieceProcessor
  3136. tokenizer_path = self.dir_model / 'tokenizer.model'
  3137. if not tokenizer_path.is_file():
  3138. raise ValueError(f'Error: Missing {tokenizer_path}')
  3139. tokenizer = SentencePieceProcessor()
  3140. tokenizer.LoadFromFile(str(tokenizer_path))
  3141. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3142. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3143. scores: list[float] = [-10000.0] * vocab_size
  3144. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3145. for token_id in range(tokenizer.vocab_size()):
  3146. piece = tokenizer.IdToPiece(token_id)
  3147. text = piece.encode("utf-8")
  3148. score = tokenizer.GetScore(token_id)
  3149. toktype = SentencePieceTokenTypes.NORMAL
  3150. if tokenizer.IsUnknown(token_id):
  3151. toktype = SentencePieceTokenTypes.UNKNOWN
  3152. elif tokenizer.IsControl(token_id):
  3153. toktype = SentencePieceTokenTypes.CONTROL
  3154. elif tokenizer.IsUnused(token_id):
  3155. toktype = SentencePieceTokenTypes.UNUSED
  3156. elif tokenizer.IsByte(token_id):
  3157. toktype = SentencePieceTokenTypes.BYTE
  3158. tokens[token_id] = text
  3159. scores[token_id] = score
  3160. toktypes[token_id] = toktype
  3161. added_tokens_file = self.dir_model / 'added_tokens.json'
  3162. if added_tokens_file.is_file():
  3163. with open(added_tokens_file, "r", encoding="utf-8") as f:
  3164. added_tokens_json = json.load(f)
  3165. for key in added_tokens_json:
  3166. token_id = added_tokens_json[key]
  3167. if token_id >= vocab_size:
  3168. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  3169. continue
  3170. tokens[token_id] = key.encode("utf-8")
  3171. scores[token_id] = -1000.0
  3172. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3173. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3174. if tokenizer_config_file.is_file():
  3175. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3176. tokenizer_config_json = json.load(f)
  3177. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  3178. for token_id, foken_data in added_tokens_decoder.items():
  3179. token_id = int(token_id)
  3180. token = foken_data["content"].encode("utf-8")
  3181. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3182. if tokens[token_id] != token:
  3183. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3184. tokens[token_id] = token
  3185. scores[token_id] = -1000.0
  3186. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3187. if foken_data.get("special"):
  3188. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3189. tokenizer_file = self.dir_model / 'tokenizer.json'
  3190. if tokenizer_file.is_file():
  3191. with open(tokenizer_file, "r", encoding="utf-8") as f:
  3192. tokenizer_json = json.load(f)
  3193. added_tokens = tokenizer_json.get("added_tokens", [])
  3194. for foken_data in added_tokens:
  3195. token_id = int(foken_data["id"])
  3196. token = foken_data["content"].encode("utf-8")
  3197. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3198. if tokens[token_id] != token:
  3199. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3200. tokens[token_id] = token
  3201. scores[token_id] = -1000.0
  3202. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3203. if foken_data.get("special"):
  3204. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3205. self.gguf_writer.add_tokenizer_model("llama")
  3206. self.gguf_writer.add_tokenizer_pre("default")
  3207. self.gguf_writer.add_token_list(tokens)
  3208. self.gguf_writer.add_token_scores(scores)
  3209. self.gguf_writer.add_token_types(toktypes)
  3210. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3211. special_vocab.add_to_gguf(self.gguf_writer)
  3212. def set_gguf_parameters(self):
  3213. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  3214. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3215. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3216. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  3217. rms_eps = self.find_hparam(["rms_norm_eps"])
  3218. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  3219. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  3220. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  3221. rope_dims = int(rot_pct * n_embd) // n_head
  3222. self.gguf_writer.add_context_length(max_pos_embds)
  3223. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  3224. self.gguf_writer.add_embedding_length(n_embd)
  3225. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  3226. self.gguf_writer.add_block_count(block_count)
  3227. self.gguf_writer.add_head_count(n_head)
  3228. self.gguf_writer.add_head_count_kv(n_head_kv)
  3229. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  3230. self.gguf_writer.add_rope_dimension_count(rope_dims)
  3231. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  3232. self.gguf_writer.add_file_type(self.ftype)
  3233. sliding_window = self.hparams.get("sliding_window")
  3234. # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
  3235. if sliding_window is None:
  3236. sliding_window = 0
  3237. self.gguf_writer.add_sliding_window(sliding_window)
  3238. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  3239. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3240. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3241. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  3242. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  3243. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  3244. rope_dims = int(rot_pct * n_embd) // n_head
  3245. # write rope scaling for long context (128k) model
  3246. rope_scaling = self.find_hparam(['rope_scaling'], True)
  3247. if rope_scaling is None:
  3248. return
  3249. scale = max_pos_embds / orig_max_pos_embds
  3250. rope_scaling_type = rope_scaling.get('rope_type', rope_scaling.get('type', '')).lower()
  3251. if len(rope_scaling_type) == 0:
  3252. raise KeyError('Missing the required key rope_scaling.type')
  3253. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  3254. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  3255. elif rope_scaling_type == 'yarn':
  3256. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  3257. else:
  3258. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  3259. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  3260. long_factors = rope_scaling.get('long_factor', None)
  3261. short_factors = rope_scaling.get('short_factor', None)
  3262. if long_factors is None or short_factors is None:
  3263. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  3264. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  3265. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}. long_factors = {len(long_factors)}, short_factors = {len(short_factors)}.')
  3266. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  3267. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  3268. @ModelBase.register("PhiMoEForCausalLM")
  3269. class PhiMoeModel(Phi3MiniModel):
  3270. model_arch = gguf.MODEL_ARCH.PHIMOE
  3271. _experts: list[dict[str, Tensor]] | None = None
  3272. def set_gguf_parameters(self):
  3273. super().set_gguf_parameters()
  3274. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  3275. self.gguf_writer.add_expert_count(self.hparams["num_local_experts"])
  3276. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3277. # process the experts separately
  3278. if name.find("block_sparse_moe.experts") != -1:
  3279. n_experts = self.hparams["num_local_experts"]
  3280. assert bid is not None
  3281. if self._experts is None:
  3282. self._experts = [{} for _ in range(self.block_count)]
  3283. self._experts[bid][name] = data_torch
  3284. if len(self._experts[bid]) >= n_experts * 3:
  3285. tensors: list[tuple[str, Tensor]] = []
  3286. # merge the experts into a single 3d tensor
  3287. for w_name in ["w1", "w2", "w3"]:
  3288. datas: list[Tensor] = []
  3289. for xid in range(n_experts):
  3290. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  3291. datas.append(self._experts[bid][ename])
  3292. del self._experts[bid][ename]
  3293. data_torch = torch.stack(datas, dim=0)
  3294. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  3295. new_name = self.map_tensor_name(merged_name)
  3296. tensors.append((new_name, data_torch))
  3297. return tensors
  3298. else:
  3299. return []
  3300. return [(self.map_tensor_name(name), data_torch)]
  3301. def prepare_tensors(self):
  3302. super().prepare_tensors()
  3303. if self._experts is not None:
  3304. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3305. experts = [k for d in self._experts for k in d.keys()]
  3306. if len(experts) > 0:
  3307. raise ValueError(f"Unprocessed experts: {experts}")
  3308. @ModelBase.register("PlamoForCausalLM")
  3309. class PlamoModel(TextModel):
  3310. model_arch = gguf.MODEL_ARCH.PLAMO
  3311. def set_vocab(self):
  3312. self._set_vocab_sentencepiece()
  3313. def set_gguf_parameters(self):
  3314. hparams = self.hparams
  3315. block_count = hparams["num_hidden_layers"]
  3316. self.gguf_writer.add_context_length(4096) # not in config.json
  3317. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3318. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3319. self.gguf_writer.add_block_count(block_count)
  3320. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3321. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  3322. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  3323. self.gguf_writer.add_file_type(self.ftype)
  3324. def shuffle_attn_q_weight(self, data_torch):
  3325. assert data_torch.size() == (5120, 5120)
  3326. data_torch = data_torch.reshape(8, 5, 128, 5120)
  3327. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  3328. data_torch = torch.reshape(data_torch, (5120, 5120))
  3329. return data_torch
  3330. def shuffle_attn_output_weight(self, data_torch):
  3331. assert data_torch.size() == (5120, 5120)
  3332. data_torch = data_torch.reshape(5120, 8, 5, 128)
  3333. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  3334. data_torch = torch.reshape(data_torch, (5120, 5120))
  3335. return data_torch
  3336. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3337. del bid # unused
  3338. new_name = self.map_tensor_name(name)
  3339. # shuffle for broadcasting of gqa in ggml_mul_mat
  3340. if new_name.endswith("attn_q.weight"):
  3341. data_torch = self.shuffle_attn_q_weight(data_torch)
  3342. elif new_name.endswith("attn_output.weight"):
  3343. data_torch = self.shuffle_attn_output_weight(data_torch)
  3344. return [(new_name, data_torch)]
  3345. @ModelBase.register("Plamo2ForCausalLM", "PLaMo2ForCausalLM")
  3346. class Plamo2Model(TextModel):
  3347. model_arch = gguf.MODEL_ARCH.PLAMO2
  3348. def set_vocab(self):
  3349. # PLaMo 2 uses a custom tokenizer with a .jsonl file
  3350. # We need to handle this specially
  3351. tokenizer_jsonl_path = self.dir_model / "tokenizer.jsonl"
  3352. tokenizer_config_path = self.dir_model / "tokenizer_config.json"
  3353. if not tokenizer_jsonl_path.is_file():
  3354. raise FileNotFoundError(f"PLaMo 2 tokenizer file not found: {tokenizer_jsonl_path}")
  3355. # Load tokenizer config
  3356. with open(tokenizer_config_path, 'r', encoding='utf-8') as f:
  3357. tokenizer_config = json.load(f)
  3358. # Load tokens from JSONL file (actually a list format)
  3359. tokens = []
  3360. scores = []
  3361. toktypes = []
  3362. with open(tokenizer_jsonl_path, 'r', encoding='utf-8') as f:
  3363. for line_num, line in enumerate(f):
  3364. if line.strip():
  3365. token_data = json.loads(line)
  3366. # Format: [token, score, type, ?, ?, ?, ?]
  3367. token = token_data[0].encode("utf-8")
  3368. score = float(token_data[1])
  3369. token_type_str = token_data[2] if len(token_data) > 2 else "NORMAL"
  3370. tokens.append(token)
  3371. scores.append(score)
  3372. # Map token type strings to GGUF token types
  3373. if token_type_str == "UNKNOWN":
  3374. toktypes.append(gguf.TokenType.UNKNOWN)
  3375. elif token_type_str == "CONTROL":
  3376. toktypes.append(gguf.TokenType.CONTROL)
  3377. elif token_type_str == "BYTE":
  3378. toktypes.append(gguf.TokenType.BYTE)
  3379. else:
  3380. # Check for PLaMo-2 special tokens
  3381. token_str = token_data[0]
  3382. if token_str.startswith("<|plamo:") and token_str.endswith("|>"):
  3383. toktypes.append(gguf.TokenType.CONTROL)
  3384. else:
  3385. toktypes.append(gguf.TokenType.NORMAL)
  3386. vocab_size = self.hparams["vocab_size"]
  3387. if vocab_size > len(tokens):
  3388. pad_count = vocab_size - len(tokens)
  3389. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  3390. for i in range(1, pad_count + 1):
  3391. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  3392. scores.append(-1000.0)
  3393. toktypes.append(gguf.TokenType.UNUSED)
  3394. # Use "plamo2" tokenizer type for PLaMo-2's custom Aho-Corasick tokenizer
  3395. self.gguf_writer.add_tokenizer_model("plamo2")
  3396. self.gguf_writer.add_tokenizer_pre("default")
  3397. self.gguf_writer.add_token_list(tokens)
  3398. self.gguf_writer.add_token_scores(scores)
  3399. self.gguf_writer.add_token_types(toktypes)
  3400. # Add special tokens from config
  3401. if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] is not None:
  3402. token_id = tokens.index(tokenizer_config["bos_token"].encode("utf-8"))
  3403. self.gguf_writer.add_bos_token_id(token_id)
  3404. if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] is not None:
  3405. token_id = tokens.index(tokenizer_config["eos_token"].encode("utf-8"))
  3406. self.gguf_writer.add_eos_token_id(token_id)
  3407. if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] is not None:
  3408. token_id = tokens.index(tokenizer_config["pad_token"].encode("utf-8"))
  3409. self.gguf_writer.add_pad_token_id(token_id)
  3410. if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] is not None:
  3411. token_id = tokens.index(tokenizer_config["sep_token"].encode("utf-8"))
  3412. self.gguf_writer.add_sep_token_id(token_id)
  3413. if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] is not None:
  3414. token_id = tokens.index(tokenizer_config["unk_token"].encode("utf-8"))
  3415. self.gguf_writer.add_unk_token_id(token_id)
  3416. # Add <|plamo:op|> as EOT to ensure appropriate end of generation
  3417. self.gguf_writer.add_eot_token_id(4)
  3418. self.gguf_writer.add_add_space_prefix(False)
  3419. def set_gguf_parameters(self):
  3420. hparams = self.hparams
  3421. block_count = hparams["num_hidden_layers"]
  3422. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  3423. # Which layers are Mamba layers
  3424. # PLaMo 2 uses mamba_step to indicate the pattern (e.g., 2 means every other layer)
  3425. # This logic matches modeling_plamo.py's is_mamba function
  3426. mamba_step = hparams.get("mamba_step", 2)
  3427. mamba_enabled = hparams.get("mamba_enabled", True)
  3428. mamba_layers = []
  3429. if mamba_enabled:
  3430. for i in range(block_count):
  3431. if block_count <= (mamba_step // 2):
  3432. # use attention in last layer
  3433. is_mamba = (i != block_count - 1)
  3434. else:
  3435. is_mamba = (i % mamba_step) != (mamba_step // 2)
  3436. if is_mamba:
  3437. mamba_layers.append(0)
  3438. else:
  3439. mamba_layers.append(hparams.get("num_key_value_heads", 4))
  3440. if mamba_layers:
  3441. self.gguf_writer.add_head_count_kv(mamba_layers)
  3442. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 2048))
  3443. self.gguf_writer.add_embedding_length(hparams.get("hidden_size", 4096))
  3444. self.gguf_writer.add_block_count(block_count)
  3445. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 32))
  3446. self.gguf_writer.add_layer_norm_rms_eps(hparams.get("rms_norm_eps", 1e-06))
  3447. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 10000))
  3448. # Mamba parameters
  3449. self.gguf_writer.add_ssm_state_size(hparams.get("mamba_d_state", 64))
  3450. self.gguf_writer.add_ssm_conv_kernel(hparams.get("mamba_d_conv", 4))
  3451. self.gguf_writer.add_ssm_time_step_rank(hparams.get("mamba_num_heads", 64))
  3452. intermediate_size = hparams.get("mamba_num_heads", 64) * hparams.get("hidden_size_per_head", 128)
  3453. self.gguf_writer.add_ssm_inner_size(intermediate_size)
  3454. self.gguf_writer.add_ssm_group_count(0)
  3455. # MLP feed forward parameters (for attention layers)
  3456. self.gguf_writer.add_feed_forward_length(hparams.get("intermediate_size", 13312))
  3457. self.gguf_writer.add_file_type(self.ftype)
  3458. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3459. del bid # unused
  3460. if name.endswith(".A_log"):
  3461. data_torch = -torch.exp(data_torch)
  3462. elif name.endswith(".dt_bias"):
  3463. name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
  3464. elif name.endswith(".dt_norm_weight"):
  3465. name = name.rpartition(".dt_norm_weight")[0] + ".dt_norm.weight"
  3466. elif name.endswith(".B_norm_weight"):
  3467. name = name.rpartition(".B_norm_weight")[0] + ".B_norm.weight"
  3468. elif name.endswith(".C_norm_weight"):
  3469. name = name.rpartition(".C_norm_weight")[0] + ".C_norm.weight"
  3470. elif name.endswith(".k_weight"):
  3471. name = name.rpartition(".k_weight")[0] + ".k.weight"
  3472. elif name.endswith(".q_weight"):
  3473. name = name.rpartition(".q_weight")[0] + ".q.weight"
  3474. elif name.endswith(".conv1d.weight"):
  3475. data_torch = torch.squeeze(data_torch) # remove (, 1, )
  3476. assert data_torch.ndim == 2
  3477. elif name.endswith(".pre_mixer_norm.weight"):
  3478. data_torch += 1.0
  3479. elif name.endswith(".post_mixer_norm.weight"):
  3480. data_torch += 1.0 / 5
  3481. elif name.endswith(".pre_mlp_norm.weight"):
  3482. data_torch += 1.0
  3483. elif name.endswith(".post_mlp_norm.weight"):
  3484. data_torch += 1.0 / (5**1.5)
  3485. elif name.endswith(".norm.weight"):
  3486. data_torch += 1.0
  3487. new_name = self.map_tensor_name(name)
  3488. return [(new_name, data_torch)]
  3489. @ModelBase.register("CodeShellForCausalLM")
  3490. class CodeShellModel(TextModel):
  3491. model_arch = gguf.MODEL_ARCH.CODESHELL
  3492. def set_gguf_parameters(self):
  3493. block_count = self.hparams["n_layer"]
  3494. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  3495. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  3496. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  3497. self.gguf_writer.add_block_count(block_count)
  3498. self.gguf_writer.add_head_count(self.hparams["n_head"])
  3499. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  3500. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3501. self.gguf_writer.add_file_type(self.ftype)
  3502. self.gguf_writer.add_rope_freq_base(10000.0)
  3503. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3504. self.gguf_writer.add_rope_scaling_factor(1.0)
  3505. _has_tok_embd = False
  3506. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3507. del bid # unused
  3508. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  3509. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  3510. new_name = self.map_tensor_name(name)
  3511. # assuming token_embd.weight is seen before output.weight
  3512. if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  3513. # even though the tensor file(s) does not contain the word embeddings they are still in the weight map
  3514. if self.tensor_names and "transformer.wte.weight" in self.tensor_names:
  3515. logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied")
  3516. self.tensor_names.remove("transformer.wte.weight")
  3517. elif new_name == tok_embd_name:
  3518. self._has_tok_embd = True
  3519. return [(new_name, data_torch)]
  3520. @ModelBase.register("InternLM2ForCausalLM")
  3521. class InternLM2Model(TextModel):
  3522. model_arch = gguf.MODEL_ARCH.INTERNLM2
  3523. def set_vocab(self):
  3524. # (TODO): Is there a better way?
  3525. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  3526. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  3527. # recognized as an empty string in C++.
  3528. from sentencepiece import SentencePieceProcessor
  3529. from sentencepiece import sentencepiece_model_pb2 as model
  3530. tokenizer_path = self.dir_model / 'tokenizer.model'
  3531. tokens: list[bytes] = []
  3532. scores: list[float] = []
  3533. toktypes: list[int] = []
  3534. if not tokenizer_path.is_file():
  3535. logger.error(f'Error: Missing {tokenizer_path}')
  3536. sys.exit(1)
  3537. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  3538. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  3539. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  3540. tokenizer = SentencePieceProcessor()
  3541. tokenizer.LoadFromFile(str(tokenizer_path))
  3542. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3543. for token_id in range(vocab_size):
  3544. piece = tokenizer.IdToPiece(token_id)
  3545. text = piece.encode("utf-8")
  3546. score = tokenizer.GetScore(token_id)
  3547. if text == b"\x00":
  3548. # (TODO): fixme
  3549. # Hack here and replace the \x00 characters.
  3550. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  3551. text = "🐉".encode("utf-8")
  3552. toktype = SentencePieceTokenTypes.NORMAL
  3553. if tokenizer.IsUnknown(token_id):
  3554. toktype = SentencePieceTokenTypes.UNKNOWN
  3555. elif tokenizer.IsControl(token_id):
  3556. toktype = SentencePieceTokenTypes.CONTROL
  3557. elif tokenizer.IsUnused(token_id):
  3558. toktype = SentencePieceTokenTypes.UNUSED
  3559. elif tokenizer.IsByte(token_id):
  3560. toktype = SentencePieceTokenTypes.BYTE
  3561. # take care of ununsed raw token
  3562. if piece.startswith('[UNUSED'):
  3563. toktype = SentencePieceTokenTypes.UNUSED
  3564. tokens.append(text)
  3565. scores.append(score)
  3566. toktypes.append(toktype)
  3567. added_tokens_file = self.dir_model / 'added_tokens.json'
  3568. if added_tokens_file.is_file():
  3569. with open(added_tokens_file, "r", encoding="utf-8") as f:
  3570. added_tokens_json = json.load(f)
  3571. for key in added_tokens_json:
  3572. tokens.append(key.encode("utf-8"))
  3573. scores.append(-1000.0)
  3574. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  3575. chat_eos_token = '<|im_end|>'
  3576. chat_eos_token_id = None
  3577. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3578. if tokenizer_config_file.is_file():
  3579. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3580. tokenizer_config_json = json.load(f)
  3581. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  3582. for token_id, foken_data in added_tokens_decoder.items():
  3583. token_id = int(token_id)
  3584. token = foken_data["content"]
  3585. if token == chat_eos_token:
  3586. chat_eos_token_id = token_id
  3587. token = token.encode("utf-8")
  3588. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3589. if tokens[token_id] != token:
  3590. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3591. tokens[token_id] = token
  3592. scores[token_id] = -1000.0
  3593. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3594. if foken_data.get("special"):
  3595. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3596. tokenizer_file = self.dir_model / 'tokenizer.json'
  3597. if tokenizer_file.is_file():
  3598. with open(tokenizer_file, "r", encoding="utf-8") as f:
  3599. tokenizer_json = json.load(f)
  3600. added_tokens = tokenizer_json.get("added_tokens", [])
  3601. for foken_data in added_tokens:
  3602. token_id = int(foken_data["id"])
  3603. token = foken_data["content"]
  3604. if token == chat_eos_token:
  3605. chat_eos_token_id = token_id
  3606. token = token.encode("utf-8")
  3607. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3608. if tokens[token_id] != token:
  3609. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3610. tokens[token_id] = token
  3611. scores[token_id] = -1000.0
  3612. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3613. if foken_data.get("special"):
  3614. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3615. self.gguf_writer.add_tokenizer_model("llama")
  3616. self.gguf_writer.add_tokenizer_pre("default")
  3617. self.gguf_writer.add_token_list(tokens)
  3618. self.gguf_writer.add_token_scores(scores)
  3619. self.gguf_writer.add_token_types(toktypes)
  3620. self.gguf_writer.add_add_space_prefix(add_prefix)
  3621. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3622. old_eos = special_vocab.special_token_ids["eos"]
  3623. if chat_eos_token_id is not None:
  3624. # For the chat model, we replace the eos with '<|im_end|>'.
  3625. # TODO: this is a hack, should be fixed
  3626. # https://github.com/ggml-org/llama.cpp/pull/6745#issuecomment-2067687048
  3627. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  3628. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  3629. " in chat mode so that the conversation can end normally.")
  3630. special_vocab.add_to_gguf(self.gguf_writer)
  3631. def set_gguf_parameters(self):
  3632. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  3633. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  3634. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  3635. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  3636. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  3637. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  3638. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3639. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  3640. self.gguf_writer.add_file_type(self.ftype)
  3641. rope_scaling = self.hparams.get("rope_scaling") or {}
  3642. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  3643. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3644. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3645. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3646. num_heads = self.hparams["num_attention_heads"]
  3647. num_kv_heads = self.hparams["num_key_value_heads"]
  3648. n_embd = self.hparams["hidden_size"]
  3649. q_per_kv = num_heads // num_kv_heads
  3650. head_dim = n_embd // num_heads
  3651. num_groups = num_heads // q_per_kv
  3652. name = name.replace("language_model.", "") # InternVL
  3653. if name.startswith("mlp") or name.startswith("vision_model"):
  3654. # skip visual tensors
  3655. return []
  3656. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  3657. qkv = data_torch
  3658. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  3659. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  3660. # The model weights of q and k equire additional reshape.
  3661. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  3662. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  3663. v = v.reshape((-1, v.shape[-1]))
  3664. return [
  3665. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  3666. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  3667. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  3668. ]
  3669. else:
  3670. return [(self.map_tensor_name(name), data_torch)]
  3671. @ModelBase.register("InternLM3ForCausalLM")
  3672. class InternLM3Model(TextModel):
  3673. model_arch = gguf.MODEL_ARCH.LLAMA
  3674. def set_vocab(self):
  3675. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  3676. self.gguf_writer.add_tokenizer_model("llama")
  3677. self.gguf_writer.add_tokenizer_pre("default")
  3678. self.gguf_writer.add_token_list(tokens)
  3679. self.gguf_writer.add_token_scores(scores)
  3680. self.gguf_writer.add_token_types(toktypes)
  3681. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3682. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3683. if tokenizer_config_file.is_file():
  3684. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3685. tokenizer_config_json = json.load(f)
  3686. if "add_prefix_space" in tokenizer_config_json:
  3687. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  3688. if "added_tokens_decoder" in tokenizer_config_json:
  3689. for token_id, token_data in tokenizer_config_json["added_tokens_decoder"].items():
  3690. if token_data.get("special"):
  3691. token_id = int(token_id)
  3692. token = token_data["content"]
  3693. special_vocab._set_special_token(token, token_id)
  3694. # update eos token
  3695. if token == '<|im_end|>' and "eos" in special_vocab.special_token_ids:
  3696. special_vocab.special_token_ids["eos"] = token_id
  3697. special_vocab.add_to_gguf(self.gguf_writer)
  3698. def set_gguf_parameters(self):
  3699. super().set_gguf_parameters()
  3700. hparams = self.hparams
  3701. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3702. if (rope_dim := hparams.get("head_dim")) is None:
  3703. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  3704. self.gguf_writer.add_rope_dimension_count(rope_dim)
  3705. rope_scaling = self.hparams.get("rope_scaling") or {}
  3706. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  3707. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3708. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3709. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3710. n_head = self.hparams["num_attention_heads"]
  3711. n_kv_head = self.hparams.get("num_key_value_heads")
  3712. name = name.replace("language_model.", "") # InternVL
  3713. if name.startswith("mlp") or name.startswith("vision_model"):
  3714. # skip visual tensors
  3715. return []
  3716. if name.endswith(("q_proj.weight", "q_proj.bias")):
  3717. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3718. if name.endswith(("k_proj.weight", "k_proj.bias")):
  3719. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3720. return [(self.map_tensor_name(name), data_torch)]
  3721. @ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel", "BertForSequenceClassification")
  3722. class BertModel(TextModel):
  3723. model_arch = gguf.MODEL_ARCH.BERT
  3724. def __init__(self, *args, **kwargs):
  3725. super().__init__(*args, **kwargs)
  3726. self.vocab_size = None
  3727. if cls_out_labels := self.hparams.get("id2label"):
  3728. if len(cls_out_labels) == 2 and cls_out_labels[0] == "LABEL_0":
  3729. # Remove dummy labels added by AutoConfig
  3730. cls_out_labels = None
  3731. self.cls_out_labels = cls_out_labels
  3732. def set_gguf_parameters(self):
  3733. super().set_gguf_parameters()
  3734. self.gguf_writer.add_causal_attention(False)
  3735. self._try_set_pooling_type()
  3736. if self.cls_out_labels:
  3737. self.gguf_writer.add_classifier_output_labels([v for k, v in sorted(self.cls_out_labels.items())])
  3738. def set_vocab(self):
  3739. tokens, toktypes, tokpre = self.get_vocab_base()
  3740. self.vocab_size = len(tokens)
  3741. # we need this to validate the size of the token_type embeddings
  3742. # though currently we are passing all zeros to the token_type embeddings
  3743. # "Sequence A" or "Sequence B"
  3744. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3745. # convert to phantom space vocab
  3746. def phantom(tok):
  3747. if tok.startswith("[") and tok.endswith("]"):
  3748. return tok
  3749. if tok.startswith("##"):
  3750. return tok[2:]
  3751. return "\u2581" + tok
  3752. tokens = list(map(phantom, tokens))
  3753. # add vocab to gguf
  3754. self.gguf_writer.add_tokenizer_model("bert")
  3755. self.gguf_writer.add_tokenizer_pre(tokpre)
  3756. self.gguf_writer.add_token_list(tokens)
  3757. self.gguf_writer.add_token_types(toktypes)
  3758. # handle special tokens
  3759. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3760. special_vocab.add_to_gguf(self.gguf_writer)
  3761. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3762. del bid # unused
  3763. if name.startswith("bert."):
  3764. name = name[5:]
  3765. if name.endswith(".gamma"):
  3766. name = name[:-6] + ".weight"
  3767. if name.endswith(".beta"):
  3768. name = name[:-5] + ".bias"
  3769. # we are only using BERT for embeddings so we don't need the pooling layer
  3770. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  3771. return [] # we don't need these
  3772. if name.startswith("cls.predictions"):
  3773. return []
  3774. if name.startswith("cls.seq_relationship"):
  3775. return []
  3776. if self.cls_out_labels:
  3777. # For BertForSequenceClassification (direct projection layer)
  3778. if name == "classifier.weight":
  3779. name = "classifier.out_proj.weight"
  3780. if name == "classifier.bias":
  3781. name = "classifier.out_proj.bias"
  3782. return [(self.map_tensor_name(name), data_torch)]
  3783. def _xlmroberta_tokenizer_init(self) -> None:
  3784. # we need the pad_token_id to know how to chop down position_embd matrix
  3785. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3786. self._position_offset = 1 + pad_token_id
  3787. if "max_position_embeddings" in self.hparams:
  3788. self.hparams["max_position_embeddings"] -= self._position_offset
  3789. else:
  3790. self._position_offset = None
  3791. def _xlmroberta_set_vocab(self) -> None:
  3792. # to avoid TypeError: Descriptors cannot be created directly
  3793. # exception when importing sentencepiece_model_pb2
  3794. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  3795. from sentencepiece import SentencePieceProcessor
  3796. from sentencepiece import sentencepiece_model_pb2 as model
  3797. tokenizer_path = self.dir_model / 'sentencepiece.bpe.model'
  3798. tokenizer_json = {}
  3799. tokenizer_config_json = {}
  3800. if not tokenizer_path.is_file():
  3801. tokenizer_path = self.dir_model / 'tokenizer.json'
  3802. tokenizer_config_path = self.dir_model / 'tokenizer_config.json'
  3803. if not tokenizer_path.is_file():
  3804. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  3805. from base64 import b64decode
  3806. from transformers import AutoTokenizer
  3807. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  3808. with open(tokenizer_path, "r", encoding="utf-8") as fp:
  3809. tokenizer_json = json.load(fp)
  3810. if tokenizer_config_path.is_file():
  3811. with open(tokenizer_config_path, "r", encoding="utf-8") as fp:
  3812. tokenizer_config_json = json.load(fp)
  3813. add_prefix = tokenizer.add_prefix_space
  3814. remove_whitespaces = tokenizer.clean_up_tokenization_spaces
  3815. precompiled_charsmap = b64decode(tokenizer_json["normalizer"]["precompiled_charsmap"])
  3816. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size)
  3817. else:
  3818. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  3819. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  3820. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  3821. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  3822. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  3823. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  3824. tokenizer = SentencePieceProcessor()
  3825. tokenizer.LoadFromFile(str(tokenizer_path))
  3826. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size())
  3827. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3828. scores: list[float] = [-10000.0] * vocab_size
  3829. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3830. if isinstance(tokenizer, SentencePieceProcessor):
  3831. for token_id in range(tokenizer.vocab_size()):
  3832. piece = tokenizer.IdToPiece(token_id)
  3833. text = piece.encode("utf-8")
  3834. score = tokenizer.GetScore(token_id)
  3835. toktype = SentencePieceTokenTypes.NORMAL
  3836. if tokenizer.IsUnknown(token_id):
  3837. toktype = SentencePieceTokenTypes.UNKNOWN
  3838. elif tokenizer.IsControl(token_id):
  3839. toktype = SentencePieceTokenTypes.CONTROL
  3840. elif tokenizer.IsUnused(token_id):
  3841. toktype = SentencePieceTokenTypes.UNUSED
  3842. elif tokenizer.IsByte(token_id):
  3843. toktype = SentencePieceTokenTypes.BYTE
  3844. tokens[token_id] = text
  3845. scores[token_id] = score
  3846. toktypes[token_id] = toktype
  3847. else:
  3848. added_vocab = tokenizer.get_added_vocab()
  3849. unk_token = tokenizer_config_json.get("unk_token")
  3850. unk_token_id = added_vocab.get(unk_token, tokenizer_json["model"].get("unk_id", 3))
  3851. for token_id in range(tokenizer.vocab_size):
  3852. piece = tokenizer._convert_id_to_token(token_id)
  3853. if (piece := tokenizer._convert_id_to_token(token_id)) is not None:
  3854. text = piece.encode("utf-8")
  3855. score = tokenizer_json["model"]["vocab"][token_id][1]
  3856. toktype = SentencePieceTokenTypes.NORMAL
  3857. if token_id == unk_token_id:
  3858. toktype = SentencePieceTokenTypes.UNKNOWN
  3859. elif token_id in tokenizer.all_special_ids:
  3860. toktype = SentencePieceTokenTypes.CONTROL
  3861. elif token_id in added_vocab.values():
  3862. toktype = SentencePieceTokenTypes.USER_DEFINED
  3863. # No reliable way to detect this, but jina doesn't have any
  3864. # elif tokenizer.IsByte(token_id):
  3865. # toktype = SentencePieceTokenTypes.BYTE
  3866. tokens[token_id] = text
  3867. scores[token_id] = score
  3868. toktypes[token_id] = toktype
  3869. if isinstance(tokenizer, SentencePieceProcessor):
  3870. # realign tokens (see HF tokenizer code)
  3871. tokens = [b'<s>', b'<pad>', b'</s>', b'<unk>'] + tokens[3:-1]
  3872. scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1]
  3873. toktypes = [
  3874. SentencePieceTokenTypes.CONTROL,
  3875. SentencePieceTokenTypes.CONTROL,
  3876. SentencePieceTokenTypes.CONTROL,
  3877. SentencePieceTokenTypes.UNKNOWN,
  3878. ] + toktypes[3:-1]
  3879. if self.model_arch == gguf.MODEL_ARCH.NOMIC_BERT_MOE:
  3880. # Add mask token missing from sentencepiece.bpe.model
  3881. tokens[250001] = b'<mask>'
  3882. scores[250001] = 0.0
  3883. toktypes[250001] = SentencePieceTokenTypes.CONTROL
  3884. self.gguf_writer.add_tokenizer_model("t5")
  3885. self.gguf_writer.add_tokenizer_pre("default")
  3886. self.gguf_writer.add_token_list(tokens)
  3887. self.gguf_writer.add_token_scores(scores)
  3888. self.gguf_writer.add_token_types(toktypes)
  3889. self.gguf_writer.add_add_space_prefix(add_prefix)
  3890. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3891. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  3892. if precompiled_charsmap:
  3893. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  3894. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3895. special_vocab.add_to_gguf(self.gguf_writer)
  3896. @ModelBase.register("DistilBertModel", "DistilBertForMaskedLM", "DistilBertForSequenceClassification")
  3897. class DistilBertModel(BertModel):
  3898. model_arch = gguf.MODEL_ARCH.BERT
  3899. def set_gguf_parameters(self):
  3900. self.gguf_writer.add_layer_norm_eps(1e-12)
  3901. logger.info("gguf: layer norm epsilon = 1e-12")
  3902. super().set_gguf_parameters()
  3903. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3904. if name.startswith("distilbert."):
  3905. name = name[11:]
  3906. # These layers act as MLM head, so we don't need them
  3907. if name.startswith("vocab_"):
  3908. return []
  3909. return super().modify_tensors(data_torch, name, bid)
  3910. @ModelBase.register("RobertaModel", "RobertaForSequenceClassification")
  3911. class RobertaModel(BertModel):
  3912. model_arch = gguf.MODEL_ARCH.BERT
  3913. def __init__(self, *args, **kwargs):
  3914. super().__init__(*args, **kwargs)
  3915. # we need the pad_token_id to know how to chop down position_embd matrix
  3916. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3917. self._position_offset = 1 + pad_token_id
  3918. if "max_position_embeddings" in self.hparams:
  3919. self.hparams["max_position_embeddings"] -= self._position_offset
  3920. else:
  3921. self._position_offset = None
  3922. def set_vocab(self):
  3923. """Support BPE tokenizers for roberta models"""
  3924. bpe_tok_path = self.dir_model / "tokenizer.json"
  3925. if bpe_tok_path.exists():
  3926. self._set_vocab_gpt2()
  3927. # we need this to validate the size of the token_type embeddings
  3928. # though currently we are passing all zeros to the token_type embeddings
  3929. # "Sequence A" or "Sequence B"
  3930. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3931. else:
  3932. return super().set_vocab()
  3933. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3934. # if name starts with "roberta.", remove the prefix
  3935. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  3936. if name.startswith("roberta."):
  3937. name = name[8:]
  3938. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  3939. if name == "embeddings.position_embeddings.weight":
  3940. if self._position_offset is not None:
  3941. data_torch = data_torch[self._position_offset:,:]
  3942. return super().modify_tensors(data_torch, name, bid)
  3943. @ModelBase.register("NomicBertModel")
  3944. class NomicBertModel(BertModel):
  3945. model_arch = gguf.MODEL_ARCH.BERT
  3946. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  3947. hparams = kwargs.pop("hparams", None)
  3948. if hparams is None:
  3949. hparams = ModelBase.load_hparams(dir_model, False)
  3950. self.is_moe = bool(hparams.get("moe_every_n_layers"))
  3951. self.model_arch = gguf.MODEL_ARCH.NOMIC_BERT_MOE if self.is_moe else gguf.MODEL_ARCH.NOMIC_BERT
  3952. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  3953. self._tokenizer_is_xlmroberta = self._is_tokenizer_xlmroberta()
  3954. if self._tokenizer_is_xlmroberta:
  3955. self._xlmroberta_tokenizer_init()
  3956. npos, mtp = self.hparams["n_positions"], self.hparams.get("max_trained_positions", 2048)
  3957. if npos == 8192 and mtp == 2048:
  3958. self.hparams["n_positions"] = 2048 # nomic-embed-text v1 and v1.5 are trained for 2048 tokens.
  3959. elif npos == 2048 and mtp == 2048:
  3960. self.hparams["n_positions"] = 512 # nomic-embed-text-v2-moe is trained for 512 tokens.
  3961. else:
  3962. raise ValueError(f"unrecognized parameters: n_positions={npos}, max_trained_positions={mtp}")
  3963. assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu"
  3964. # this doesn't do anything in the HF version
  3965. assert self.hparams["causal"] is False
  3966. # no bias tensors unless MoE
  3967. assert self.hparams["qkv_proj_bias"] == self.is_moe
  3968. assert self.hparams["mlp_fc1_bias"] == self.is_moe
  3969. assert self.hparams["mlp_fc2_bias"] == self.is_moe
  3970. # norm at end of layer
  3971. assert self.hparams["prenorm"] is False
  3972. # standard RoPE
  3973. assert self.hparams["rotary_emb_fraction"] == 1.0
  3974. assert self.hparams["rotary_emb_interleaved"] is False
  3975. assert self.hparams["rotary_emb_scale_base"] is None
  3976. def set_vocab(self) -> None:
  3977. if self._tokenizer_is_xlmroberta:
  3978. return self._xlmroberta_set_vocab()
  3979. return super().set_vocab()
  3980. def modify_tensors(self, data_torch: torch.Tensor, name: str, bid: int | None) -> Iterable[tuple[str, torch.Tensor]]:
  3981. # If the tensor is an experts bias tensor, skip it by returning an empty list.
  3982. if "mlp.experts.bias" in name:
  3983. return [] # Explicitly return an empty list.
  3984. if "mlp.experts.mlp.w1" in name:
  3985. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3986. name += ".weight"
  3987. if "mlp.experts.mlp.w2" in name:
  3988. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3989. data_torch = data_torch.transpose(1, 2)
  3990. name += ".weight"
  3991. return [(self.map_tensor_name(name), data_torch)]
  3992. def set_gguf_parameters(self):
  3993. super().set_gguf_parameters()
  3994. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  3995. if self.is_moe:
  3996. self.gguf_writer.add_moe_every_n_layers(self.hparams["moe_every_n_layers"])
  3997. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  3998. self.gguf_writer.add_expert_used_count(self.hparams["moe_top_k"])
  3999. def _is_tokenizer_xlmroberta(self) -> bool:
  4000. with open(self.dir_model / "tokenizer.json") as f:
  4001. tokenizer_json = json.load(f)
  4002. toktyp = tokenizer_json["model"]["type"]
  4003. if toktyp == "Unigram":
  4004. return True
  4005. if toktyp == "WordPiece":
  4006. return False
  4007. raise ValueError(f"unknown tokenizer: {toktyp}")
  4008. @ModelBase.register("NeoBERT", "NeoBERTLMHead", "NeoBERTForSequenceClassification")
  4009. class NeoBert(BertModel):
  4010. model_arch = gguf.MODEL_ARCH.NEO_BERT
  4011. def set_gguf_parameters(self):
  4012. super().set_gguf_parameters()
  4013. # NeoBERT uses 2/3 of the intermediate size as feed forward length
  4014. self.gguf_writer.add_feed_forward_length(int(2 * self.hparams["intermediate_size"] / 3))
  4015. self.gguf_writer.add_rope_freq_base(10000.0) # default value for NeoBERT
  4016. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4017. f_rms_eps = self.hparams.get("norm_eps", 1e-6) # default value for NeoBERT
  4018. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  4019. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  4020. self.gguf_writer.add_pooling_type(gguf.PoolingType.CLS) # https://huggingface.co/chandar-lab/NeoBERT#how-to-use
  4021. def modify_tensors(self, data_torch, name, bid):
  4022. if name.startswith("decoder."):
  4023. return []
  4024. if name.startswith("model."):
  4025. name = name[6:]
  4026. return super().modify_tensors(data_torch, name, bid)
  4027. @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification")
  4028. class XLMRobertaModel(BertModel):
  4029. model_arch = gguf.MODEL_ARCH.BERT
  4030. _lora_files = {}
  4031. _lora_names = []
  4032. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  4033. hparams = kwargs.pop("hparams", None)
  4034. if hparams is None:
  4035. hparams = ModelBase.load_hparams(dir_model, False)
  4036. if lora_names := hparams.get("lora_adaptations"):
  4037. self._lora_names = lora_names
  4038. self.model_arch = gguf.MODEL_ARCH.JINA_BERT_V3
  4039. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  4040. self._xlmroberta_tokenizer_init()
  4041. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  4042. if self._lora_names:
  4043. for name in self._lora_names:
  4044. fname = self.add_prefix_to_filename(self.fname_out, f"lora-{name}-")
  4045. self._lora_files[name] = gguf.GGUFWriter(fname, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, dry_run=self.dry_run)
  4046. return super().generate_extra_tensors()
  4047. def set_type(self):
  4048. for lora_writer in self._lora_files.values():
  4049. lora_writer.add_type(gguf.GGUFType.ADAPTER)
  4050. lora_writer.add_string(gguf.Keys.Adapter.TYPE, "lora")
  4051. super().set_type()
  4052. def set_vocab(self):
  4053. self._xlmroberta_set_vocab()
  4054. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4055. # if name starts with "roberta.", remove the prefix
  4056. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  4057. if name.startswith("roberta."):
  4058. name = name[8:]
  4059. # jina-embeddings-v3
  4060. if ".parametrizations." in name:
  4061. name = name.replace(".parametrizations.", ".")
  4062. if name.endswith(".original"):
  4063. name = name[:-9]
  4064. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  4065. if name == "embeddings.position_embeddings.weight":
  4066. if self._position_offset is not None:
  4067. data_torch = data_torch[self._position_offset:,:]
  4068. if name.endswith(".0.lora_A") or name.endswith(".0.lora_B"):
  4069. if name.startswith("pooler.dense"):
  4070. return []
  4071. num_loras = data_torch.size(0)
  4072. assert num_loras == len(self._lora_names)
  4073. # Split out each LoRA in their own GGUF
  4074. for i, lora_writer in enumerate(self._lora_files.values()):
  4075. new_name = self.map_tensor_name(name[:-9]) + name[-7:].lower()
  4076. data = data_torch[i, :, :]
  4077. # Transpose/flip token_embd/types into correct shape
  4078. if new_name == "token_embd.weight.lora_b":
  4079. data = data.T
  4080. elif new_name.startswith("token_types.weight."):
  4081. new_name = new_name[:-1] + ("a" if new_name[-1:] == "b" else "b")
  4082. lora_writer.add_tensor(new_name, data.float().numpy(), raw_dtype=gguf.GGMLQuantizationType.F32)
  4083. return []
  4084. return super().modify_tensors(data_torch, name, bid)
  4085. def set_gguf_parameters(self):
  4086. super().set_gguf_parameters()
  4087. # jina-embeddings-v3
  4088. if rotary_emb_base := self.hparams.get("rotary_emb_base"):
  4089. self.gguf_writer.add_rope_freq_base(rotary_emb_base)
  4090. lora_alpha = self.hparams.get("lora_alpha")
  4091. if lora_prompt_prefixes := self.hparams.get("task_instructions"):
  4092. assert self._lora_files and all(lora_name in lora_prompt_prefixes for lora_name in self._lora_files.keys())
  4093. for lora_name, lora_writer in self._lora_files.items():
  4094. lora_writer.add_float32(gguf.Keys.Adapter.LORA_ALPHA, lora_alpha if lora_alpha is not None else 1.0)
  4095. lora_writer.add_string(gguf.Keys.Adapter.LORA_TASK_NAME, lora_name)
  4096. if lora_prompt_prefixes:
  4097. lora_writer.add_string(gguf.Keys.Adapter.LORA_PROMPT_PREFIX, lora_prompt_prefixes[lora_name])
  4098. def write(self):
  4099. super().write()
  4100. for lora_writer in self._lora_files.values():
  4101. lora_writer.write_header_to_file()
  4102. lora_writer.write_kv_data_to_file()
  4103. lora_writer.write_tensors_to_file(progress=True)
  4104. lora_writer.close()
  4105. @ModelBase.register("GemmaForCausalLM")
  4106. class GemmaModel(TextModel):
  4107. model_arch = gguf.MODEL_ARCH.GEMMA
  4108. def set_vocab(self):
  4109. self._set_vocab_sentencepiece()
  4110. # TODO: these special tokens should be exported only for the CodeGemma family
  4111. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  4112. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  4113. special_vocab._set_special_token("prefix", 67)
  4114. special_vocab._set_special_token("suffix", 69)
  4115. special_vocab._set_special_token("middle", 68)
  4116. special_vocab._set_special_token("fsep", 70)
  4117. special_vocab._set_special_token("eot", 107)
  4118. special_vocab.chat_template = None # do not add it twice
  4119. special_vocab.add_to_gguf(self.gguf_writer)
  4120. self.gguf_writer.add_add_space_prefix(False)
  4121. def set_gguf_parameters(self):
  4122. hparams = self.hparams
  4123. block_count = hparams["num_hidden_layers"]
  4124. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  4125. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4126. self.gguf_writer.add_block_count(block_count)
  4127. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4128. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  4129. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  4130. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  4131. self.gguf_writer.add_key_length(hparams["head_dim"])
  4132. self.gguf_writer.add_value_length(hparams["head_dim"])
  4133. self.gguf_writer.add_file_type(self.ftype)
  4134. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4135. del bid # unused
  4136. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  4137. # To prevent errors, skip loading lm_head.weight.
  4138. if name == "lm_head.weight":
  4139. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  4140. return []
  4141. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  4142. if name.endswith("norm.weight"):
  4143. data_torch = data_torch + 1
  4144. return [(self.map_tensor_name(name), data_torch)]
  4145. @ModelBase.register("Gemma2ForCausalLM")
  4146. class Gemma2Model(TextModel):
  4147. model_arch = gguf.MODEL_ARCH.GEMMA2
  4148. def set_vocab(self):
  4149. self._set_vocab_sentencepiece()
  4150. self.gguf_writer.add_add_space_prefix(False)
  4151. def set_gguf_parameters(self):
  4152. hparams = self.hparams
  4153. block_count = hparams["num_hidden_layers"]
  4154. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  4155. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4156. self.gguf_writer.add_block_count(block_count)
  4157. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4158. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  4159. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  4160. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  4161. self.gguf_writer.add_key_length(hparams["head_dim"])
  4162. self.gguf_writer.add_value_length(hparams["head_dim"])
  4163. self.gguf_writer.add_file_type(self.ftype)
  4164. self.gguf_writer.add_attn_logit_softcapping(
  4165. self.hparams["attn_logit_softcapping"]
  4166. )
  4167. self.gguf_writer.add_final_logit_softcapping(
  4168. self.hparams["final_logit_softcapping"]
  4169. )
  4170. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  4171. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4172. del bid # unused
  4173. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  4174. # To prevent errors, skip loading lm_head.weight.
  4175. if name == "lm_head.weight":
  4176. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  4177. return []
  4178. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  4179. if name.endswith("norm.weight"):
  4180. data_torch = data_torch + 1
  4181. return [(self.map_tensor_name(name), data_torch)]
  4182. @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration")
  4183. class Gemma3Model(TextModel):
  4184. model_arch = gguf.MODEL_ARCH.GEMMA3
  4185. norm_shift = 1.0 # Gemma3RMSNorm adds 1.0 to the norm value
  4186. def set_vocab(self):
  4187. self._set_vocab_sentencepiece()
  4188. self.gguf_writer.add_add_space_prefix(False)
  4189. def set_gguf_parameters(self):
  4190. hparams = self.hparams
  4191. block_count = hparams["num_hidden_layers"]
  4192. # some default values are not specified in the hparams
  4193. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072))
  4194. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4195. self.gguf_writer.add_block_count(block_count)
  4196. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4197. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8))
  4198. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6))
  4199. self.gguf_writer.add_key_length(hparams.get("head_dim", 256))
  4200. self.gguf_writer.add_value_length(hparams.get("head_dim", 256))
  4201. self.gguf_writer.add_file_type(self.ftype)
  4202. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers
  4203. # attn_logit_softcapping is removed in Gemma3
  4204. assert hparams.get("attn_logit_softcapping") is None
  4205. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  4206. self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4))
  4207. if hparams.get("rope_scaling") is not None:
  4208. assert hparams["rope_scaling"]["rope_type"] == "linear"
  4209. # important: this rope_scaling is only applied for global layers, and not used by 1B model
  4210. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4211. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  4212. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4213. del bid # unused
  4214. if "language_model." in name:
  4215. name = name.replace("language_model.", "")
  4216. elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  4217. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  4218. return [] # skip vision tensors
  4219. # remove OOV (out-of-vocabulary) rows in token_embd
  4220. if "embed_tokens.weight" in name:
  4221. vocab = self._create_vocab_sentencepiece()
  4222. tokens = vocab[0]
  4223. data_torch = data_torch[:len(tokens)]
  4224. # ref code in Gemma3RMSNorm
  4225. # output = output * (1.0 + self.weight.float())
  4226. # note: this is not the case on gemma3n
  4227. if name.endswith("norm.weight"):
  4228. data_torch = data_torch + self.norm_shift
  4229. return [(self.map_tensor_name(name), data_torch)]
  4230. @ModelBase.register("Gemma3TextModel")
  4231. class EmbeddingGemma(Gemma3Model):
  4232. model_arch = gguf.MODEL_ARCH.GEMMA_EMBEDDING
  4233. def set_gguf_parameters(self):
  4234. super().set_gguf_parameters()
  4235. # Override the sliding window size as it gets adjusted by the Gemma3TextConfig
  4236. # constructor. We want to use the value from the original model's config.json.
  4237. # ref: https://github.com/huggingface/transformers/pull/40700
  4238. with open(self.dir_model / "config.json", "r", encoding="utf-8") as f:
  4239. config = json.load(f)
  4240. orig_sliding_window = config.get("sliding_window")
  4241. if orig_sliding_window is None:
  4242. raise ValueError("sliding_window not found in model config - this is required for the model")
  4243. logger.info(f"Using original sliding_window from config: {orig_sliding_window} "
  4244. f"instead of {self.hparams['sliding_window']}")
  4245. self.gguf_writer.add_sliding_window(orig_sliding_window)
  4246. self._try_set_pooling_type()
  4247. @ModelBase.register("Gemma3ForConditionalGeneration")
  4248. class Gemma3VisionModel(MmprojModel):
  4249. def set_gguf_parameters(self):
  4250. super().set_gguf_parameters()
  4251. hparams = self.hparams
  4252. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.GEMMA3)
  4253. # default values below are taken from HF tranformers code
  4254. self.gguf_writer.add_vision_attention_layernorm_eps(hparams.get("layer_norm_eps", 1e-6))
  4255. self.gguf_writer.add_vision_use_gelu(True)
  4256. # calculate proj_scale_factor (used by tinygemma3 test model)
  4257. image_seq_length = self.preprocessor_config.get("image_seq_length", 256)
  4258. n_per_side = int(image_seq_length ** 0.5)
  4259. image_size = self.hparams["image_size"]
  4260. patch_size = self.hparams["patch_size"]
  4261. proj_scale_factor = (image_size // patch_size) // n_per_side
  4262. if proj_scale_factor > 0 and proj_scale_factor != 4:
  4263. # we only need to write this if it's not the default value
  4264. # in this case, we are converting a test model
  4265. self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
  4266. def tensor_force_quant(self, name, new_name, bid, n_dims):
  4267. # related to https://github.com/ggml-org/llama.cpp/issues/13025
  4268. if "input_projection" in name:
  4269. return gguf.GGMLQuantizationType.F16
  4270. if ".embeddings." in name:
  4271. return gguf.GGMLQuantizationType.F32
  4272. return super().tensor_force_quant(name, new_name, bid, n_dims)
  4273. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4274. del bid # unused
  4275. if "vision_model.head." in name:
  4276. return [] # skip redundant tensors for tinygemma3
  4277. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  4278. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  4279. # process vision tensors
  4280. name = name.replace("_weight", ".weight")
  4281. # correct norm value ; only this "soft_emb_norm" need to be corrected as it's part of Gemma projector
  4282. # the other norm values are part of SigLIP model, and they are already correct
  4283. # ref code: Gemma3RMSNorm
  4284. if "soft_emb_norm.weight" in name:
  4285. logger.info(f"Correcting norm value for '{name}'")
  4286. data_torch = data_torch + 1
  4287. return [(self.map_tensor_name(name), data_torch)]
  4288. return [] # skip other tensors
  4289. @ModelBase.register("Gemma3nForConditionalGeneration")
  4290. class Gemma3NModel(Gemma3Model):
  4291. model_arch = gguf.MODEL_ARCH.GEMMA3N
  4292. norm_shift = 0.0 # same value with Gemma3p5RMSNorm scale_shift on python code
  4293. _altup_proj: list[Tensor] = []
  4294. _altup_unembd: list[Tensor] = []
  4295. def __init__(self, *args, **kwargs):
  4296. super().__init__(*args, **kwargs)
  4297. assert self.hparams["altup_num_inputs"] == 4, "Current conversion only supports 4 altup inputs"
  4298. self._altup_proj = [
  4299. torch.Tensor(), # to be replaced
  4300. torch.Tensor(), # to be replaced
  4301. torch.Tensor(), # to be replaced
  4302. ]
  4303. self._altup_unembd = [
  4304. torch.Tensor(), # to be replaced
  4305. torch.Tensor(), # to be replaced
  4306. torch.Tensor(), # to be replaced
  4307. ]
  4308. def set_vocab(self):
  4309. super().set_vocab()
  4310. def set_gguf_parameters(self):
  4311. super().set_gguf_parameters()
  4312. self.gguf_writer.add_altup_active_idx(self.hparams["altup_active_idx"])
  4313. self.gguf_writer.add_altup_num_inputs(self.hparams["altup_num_inputs"])
  4314. self.gguf_writer.add_embedding_length_per_layer_input(self.hparams["hidden_size_per_layer_input"])
  4315. self.gguf_writer.add_shared_kv_layers(self.hparams["num_kv_shared_layers"])
  4316. activation_sparsity_scale = []
  4317. for s in self.hparams["activation_sparsity_pattern"]:
  4318. normal_dist = torch.distributions.normal.Normal(0, 1)
  4319. std_multiplier = normal_dist.icdf(torch.tensor(s, dtype=torch.float32))
  4320. activation_sparsity_scale.append(std_multiplier.item())
  4321. self.gguf_writer.add_activation_sparsity_scale(activation_sparsity_scale)
  4322. sliding_window_pattern = []
  4323. for t in self.hparams["layer_types"]:
  4324. sliding_window_pattern.append(t == "sliding_attention")
  4325. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  4326. def _stack_matrices(self, matrices: list[Tensor]) -> Tensor | None:
  4327. has_all = all(m.numel() > 0 for m in matrices)
  4328. if not has_all:
  4329. return None
  4330. else:
  4331. return torch.stack(matrices, dim=0)
  4332. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4333. if name.endswith("_scale"):
  4334. name = name + ".weight"
  4335. # TODO: implement self.prediction_coefs.weight.clamp_(...)
  4336. if "language_model." not in name:
  4337. return [] # skip non-language model tensors
  4338. if "altup_unembed_projections" in name:
  4339. data_torch = data_torch.to(device="cpu")
  4340. if ".0." in name:
  4341. self._altup_unembd[0] = data_torch
  4342. elif ".1." in name:
  4343. self._altup_unembd[1] = data_torch
  4344. elif ".2." in name:
  4345. self._altup_unembd[2] = data_torch
  4346. else:
  4347. raise ValueError(f"Unknown name: {name}")
  4348. out = self._stack_matrices(self._altup_unembd)
  4349. if out is not None:
  4350. return [(self.map_tensor_name("model.altup_unembed_projections.weight"), out)]
  4351. else:
  4352. return []
  4353. if "altup_projections" in name:
  4354. data_torch = data_torch.to(device="cpu")
  4355. if ".0." in name:
  4356. self._altup_proj[0] = data_torch
  4357. elif ".1." in name:
  4358. self._altup_proj[1] = data_torch
  4359. elif ".2." in name:
  4360. self._altup_proj[2] = data_torch
  4361. else:
  4362. raise ValueError(f"Unknown name: {name}")
  4363. out = self._stack_matrices(self._altup_proj)
  4364. if out is not None:
  4365. return [(self.map_tensor_name("model.altup_projections.weight"), out)]
  4366. else:
  4367. return []
  4368. return super().modify_tensors(data_torch, name, bid)
  4369. @ModelBase.register("Starcoder2ForCausalLM")
  4370. class StarCoder2Model(TextModel):
  4371. model_arch = gguf.MODEL_ARCH.STARCODER2
  4372. @ModelBase.register("Rwkv6ForCausalLM")
  4373. class Rwkv6Model(TextModel):
  4374. model_arch = gguf.MODEL_ARCH.RWKV6
  4375. def set_vocab(self):
  4376. self._set_vocab_rwkv_world()
  4377. def set_gguf_parameters(self):
  4378. block_count = self.hparams["num_hidden_layers"]
  4379. head_size = self.hparams["head_size"]
  4380. hidden_size = self.hparams["hidden_size"]
  4381. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  4382. rescale_every_n_layers = self.hparams["rescale_every"]
  4383. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else int((hidden_size * 3.5) // 32 * 32)
  4384. time_mix_extra_dim = 64 if hidden_size == 4096 else 32
  4385. time_decay_extra_dim = 128 if hidden_size == 4096 else 64
  4386. # RWKV isn't context limited
  4387. self.gguf_writer.add_context_length(1048576)
  4388. self.gguf_writer.add_embedding_length(hidden_size)
  4389. self.gguf_writer.add_block_count(block_count)
  4390. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  4391. self.gguf_writer.add_rescale_every_n_layers(rescale_every_n_layers)
  4392. self.gguf_writer.add_wkv_head_size(head_size)
  4393. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  4394. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  4395. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4396. self.gguf_writer.add_file_type(self.ftype)
  4397. # required by llama.cpp, unused
  4398. self.gguf_writer.add_head_count(0)
  4399. lerp_weights: dict[int, dict[str, Tensor]] = {}
  4400. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4401. new_name = self.map_tensor_name(name)
  4402. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  4403. new_name += ".weight"
  4404. if new_name.endswith("time_mix_w1.weight") or new_name.endswith("time_mix_decay_w1.weight") or new_name.endswith("time_mix_decay_w2.weight"):
  4405. data_torch = data_torch.transpose(0, 1)
  4406. if new_name.endswith("time_mix_w2.weight"):
  4407. data_torch = data_torch.permute(0, 2, 1)
  4408. if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name:
  4409. data_torch = data_torch.squeeze()
  4410. try:
  4411. rescale_every_n_layers = self.hparams["rescale_every"]
  4412. if rescale_every_n_layers > 0:
  4413. if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):
  4414. data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers))
  4415. except KeyError:
  4416. pass
  4417. # concat time_mix_lerp weights to reduce some cpu overhead
  4418. # also reduces the number of tensors in the model
  4419. if bid is not None and "time_mix_lerp" in new_name and "time_mix_lerp_x" not in new_name:
  4420. try:
  4421. self.lerp_weights[bid][new_name] = data_torch
  4422. except KeyError:
  4423. self.lerp_weights[bid] = {new_name: data_torch}
  4424. if all(f"blk.{bid}.time_mix_lerp_{i}.weight" in self.lerp_weights[bid].keys() for i in ["w", "k", "v", "r", "g"]):
  4425. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4426. data = torch.stack([self.lerp_weights[bid][f"blk.{bid}.time_mix_lerp_{i}.weight"].unsqueeze(0) for i in ["w", "k", "v", "r", "g"]], dim=0).unsqueeze(1)
  4427. yield (new_name, data)
  4428. return
  4429. yield (new_name, data_torch)
  4430. @ModelBase.register("RWKV6Qwen2ForCausalLM")
  4431. class RWKV6Qwen2Model(Rwkv6Model):
  4432. model_arch = gguf.MODEL_ARCH.RWKV6QWEN2
  4433. def set_vocab(self):
  4434. try:
  4435. self._set_vocab_sentencepiece()
  4436. except FileNotFoundError:
  4437. self._set_vocab_gpt2()
  4438. def set_gguf_parameters(self):
  4439. block_count = self.hparams["num_hidden_layers"]
  4440. num_attention_heads = self.hparams["num_attention_heads"]
  4441. num_key_value_heads = self.hparams["num_key_value_heads"]
  4442. hidden_size = self.hparams["hidden_size"]
  4443. head_size = hidden_size // num_attention_heads
  4444. rms_norm_eps = self.hparams["rms_norm_eps"]
  4445. intermediate_size = self.hparams["intermediate_size"]
  4446. time_mix_extra_dim = self.hparams.get("lora_rank_tokenshift", 64 if hidden_size >= 4096 else 32)
  4447. time_decay_extra_dim = self.hparams.get("lora_rank_decay", 128 if hidden_size >= 4096 else 64)
  4448. # RWKV isn't context limited
  4449. self.gguf_writer.add_context_length(1048576)
  4450. self.gguf_writer.add_embedding_length(hidden_size)
  4451. self.gguf_writer.add_block_count(block_count)
  4452. self.gguf_writer.add_wkv_head_size(head_size)
  4453. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  4454. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  4455. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4456. self.gguf_writer.add_file_type(self.ftype)
  4457. # special parameters for time_mixing in RWKV6QWEN2
  4458. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4459. self.gguf_writer.add_token_shift_count(1)
  4460. # RWKV6QWEN2 use grouped key/value like GQA
  4461. self.gguf_writer.add_head_count_kv(num_key_value_heads)
  4462. # required by llama.cpp, unused
  4463. self.gguf_writer.add_head_count(0)
  4464. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4465. for new_name, data in super().modify_tensors(data_torch, name, bid):
  4466. if "time_mix_w1" in new_name or "time_mix_w2" in new_name:
  4467. data = data.view(5, -1, data.shape[-1])
  4468. # rwkv6qwen2 has a different order of rkvwg instead of the original wkvrg
  4469. # permute them here to avoid code changes
  4470. data = torch.stack([data[3], data[1], data[2], data[0], data[4]], dim=0).view(-1, data.shape[-1])
  4471. if "w2" in new_name:
  4472. data = data.view(5, -1, data.shape[-1])
  4473. yield (new_name, data)
  4474. continue
  4475. yield (new_name, data)
  4476. @ModelBase.register("Rwkv7ForCausalLM", "RWKV7ForCausalLM")
  4477. class Rwkv7Model(TextModel):
  4478. model_arch = gguf.MODEL_ARCH.RWKV7
  4479. def set_vocab(self):
  4480. self._set_vocab_rwkv_world()
  4481. def calc_lora_rank(self, hidden_size, exponent, multiplier):
  4482. return max(1, round(hidden_size ** exponent * multiplier / 32)) * 32
  4483. def set_gguf_parameters(self):
  4484. block_count = self.hparams["num_hidden_layers"]
  4485. try:
  4486. head_size = self.hparams["head_size"]
  4487. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  4488. except KeyError:
  4489. head_size = self.hparams["head_dim"]
  4490. layer_norm_eps = self.hparams["norm_eps"]
  4491. hidden_size = self.hparams["hidden_size"]
  4492. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else (hidden_size * 4)
  4493. # ICLR: In-Context-Learning-Rate
  4494. try:
  4495. lora_rank_decay = self.hparams["lora_rank_decay"] if self.hparams["lora_rank_decay"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4496. lora_rank_iclr = self.hparams["lora_rank_iclr"] if self.hparams["lora_rank_iclr"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4497. lora_rank_value_residual_mix = self.hparams["lora_rank_value_residual_mix"] if self.hparams["lora_rank_value_residual_mix"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  4498. lora_rank_gate = self.hparams["lora_rank_gate"] if self.hparams["lora_rank_gate"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  4499. except KeyError:
  4500. lora_rank_decay = self.hparams["decay_low_rank_dim"] if self.hparams["decay_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4501. lora_rank_iclr = self.hparams["a_low_rank_dim"] if self.hparams["a_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4502. lora_rank_value_residual_mix = self.hparams["v_low_rank_dim"] if self.hparams["v_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  4503. lora_rank_gate = self.hparams["gate_low_rank_dim"] if self.hparams["gate_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  4504. # RWKV isn't context limited
  4505. self.gguf_writer.add_context_length(1048576)
  4506. self.gguf_writer.add_embedding_length(hidden_size)
  4507. self.gguf_writer.add_block_count(block_count)
  4508. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  4509. self.gguf_writer.add_wkv_head_size(head_size)
  4510. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  4511. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  4512. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  4513. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  4514. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4515. self.gguf_writer.add_file_type(self.ftype)
  4516. # required by llama.cpp, unused
  4517. self.gguf_writer.add_head_count(0)
  4518. lerp_weights: dict[int, dict[str, Tensor]] = {}
  4519. lora_needs_transpose: bool = True
  4520. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4521. # unify tensor names here to make life easier
  4522. name = name.replace("blocks", "layers").replace("ffn", "feed_forward")
  4523. name = name.replace("self_attn", "attention").replace("attn", "attention")
  4524. name = name.replace("time_mixer.", "")
  4525. # lora layer names in fla-hub's impl
  4526. if "_lora.lora" in name:
  4527. self.lora_needs_transpose = False
  4528. name = name.replace("_lora.lora.0.weight", "1.weight")
  4529. name = name.replace("_lora.lora.2.weight", "2.weight")
  4530. name = name.replace("_lora.lora.2.bias", "0.weight")
  4531. name = name.replace("feed_forward_norm", "ln2")
  4532. name = name.replace("g_norm", "ln_x")
  4533. if "attention.v" in name and "value" not in self.map_tensor_name(name) and bid == 0:
  4534. # some models have dummy v0/v1/v2 on first layer while others don't
  4535. # ignore them all since they are not used
  4536. return
  4537. wkv_has_gate = self.hparams.get("wkv_has_gate", True)
  4538. lerp_list = ["r", "w", "k", "v", "a", "g"] if wkv_has_gate else ["r", "w", "k", "v", "a"]
  4539. if bid is not None and "attention.x_" in name:
  4540. if "attention.x_x" in name:
  4541. # already concatenated
  4542. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4543. data = data_torch.reshape(len(lerp_list), 1, 1, -1)
  4544. yield (new_name, data)
  4545. else:
  4546. try:
  4547. self.lerp_weights[bid][name] = data_torch
  4548. except KeyError:
  4549. self.lerp_weights[bid] = {name: data_torch}
  4550. if all(f"model.layers.{bid}.attention.x_{i}" in self.lerp_weights[bid].keys() for i in lerp_list):
  4551. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4552. data = torch.stack([self.lerp_weights[bid][f"model.layers.{bid}.attention.x_{i}"] for i in lerp_list], dim=0)
  4553. yield (new_name, data)
  4554. return
  4555. else:
  4556. data_torch = data_torch.squeeze()
  4557. new_name = self.map_tensor_name(name)
  4558. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  4559. new_name += ".weight"
  4560. if self.lora_needs_transpose and any(
  4561. new_name.endswith(t) for t in [
  4562. "time_mix_w1.weight", "time_mix_w2.weight",
  4563. "time_mix_a1.weight", "time_mix_a2.weight",
  4564. "time_mix_v1.weight", "time_mix_v2.weight",
  4565. "time_mix_g1.weight", "time_mix_g2.weight",
  4566. ]
  4567. ):
  4568. data_torch = data_torch.transpose(0, 1)
  4569. if 'r_k' in new_name:
  4570. data_torch = data_torch.flatten()
  4571. if bid == 0 and "time_mix_a" in new_name:
  4572. # dummy v0/v1/v2 on first layer
  4573. # easist way to make llama happy
  4574. yield (new_name.replace("time_mix_a", "time_mix_v"), data_torch)
  4575. yield (new_name, data_torch)
  4576. @ModelBase.register("RwkvHybridForCausalLM")
  4577. class ARwkv7Model(Rwkv7Model):
  4578. model_arch = gguf.MODEL_ARCH.ARWKV7
  4579. def set_vocab(self):
  4580. try:
  4581. self._set_vocab_sentencepiece()
  4582. except FileNotFoundError:
  4583. self._set_vocab_gpt2()
  4584. def set_gguf_parameters(self):
  4585. block_count = self.hparams["num_hidden_layers"]
  4586. hidden_size = self.hparams["hidden_size"]
  4587. head_size = self.hparams["head_size"]
  4588. rms_norm_eps = self.hparams["rms_norm_eps"]
  4589. intermediate_size = self.hparams["intermediate_size"]
  4590. wkv_has_gate = self.hparams["wkv_has_gate"]
  4591. assert self.hparams["wkv_version"] == 7
  4592. # ICLR: In-Context-Learning-Rate
  4593. lora_rank_decay = 64
  4594. lora_rank_iclr = 64
  4595. lora_rank_value_residual_mix = 32
  4596. lora_rank_gate = 128 if wkv_has_gate else 0
  4597. # RWKV isn't context limited
  4598. self.gguf_writer.add_context_length(1048576)
  4599. self.gguf_writer.add_embedding_length(hidden_size)
  4600. self.gguf_writer.add_block_count(block_count)
  4601. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4602. self.gguf_writer.add_wkv_head_size(head_size)
  4603. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  4604. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  4605. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  4606. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  4607. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4608. self.gguf_writer.add_file_type(self.ftype)
  4609. self.gguf_writer.add_token_shift_count(1)
  4610. # required by llama.cpp, unused
  4611. self.gguf_writer.add_head_count(0)
  4612. @ModelBase.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
  4613. class MambaModel(TextModel):
  4614. model_arch = gguf.MODEL_ARCH.MAMBA
  4615. def __init__(self, dir_model: Path, *args, **kwargs):
  4616. # Avoid using AutoConfig for hparams
  4617. hparams = kwargs.pop("hparams", None)
  4618. if hparams is None:
  4619. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  4620. hparams = json.load(f)
  4621. super().__init__(dir_model, *args, hparams=hparams, **kwargs)
  4622. def set_vocab(self):
  4623. vocab_size = self.hparams["vocab_size"]
  4624. # Round vocab size to next multiple of 8
  4625. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  4626. # pad using ceiling division
  4627. # ref: https://stackoverflow.com/a/17511341/22827863
  4628. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  4629. self.hparams["vocab_size"] = vocab_size
  4630. if (self.dir_model / "tokenizer.json").is_file():
  4631. self._set_vocab_gpt2()
  4632. elif (self.dir_model / "tokenizer.model").is_file():
  4633. self._set_vocab_sentencepiece()
  4634. else:
  4635. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  4636. self._set_vocab_builtin("gpt-neox", vocab_size)
  4637. def set_gguf_parameters(self):
  4638. d_model = self.find_hparam(["hidden_size", "d_model"])
  4639. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  4640. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  4641. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  4642. # ceiling division
  4643. # ref: https://stackoverflow.com/a/17511341/22827863
  4644. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  4645. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  4646. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  4647. use_dt_b_c_norm = False
  4648. # For falconmamba we do apply RMS norm on B / DT and C layers
  4649. if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
  4650. use_dt_b_c_norm = True
  4651. # Fail early for models which don't have a block expansion factor of 2
  4652. assert d_inner == 2 * d_model
  4653. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  4654. self.gguf_writer.add_embedding_length(d_model)
  4655. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  4656. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  4657. self.gguf_writer.add_block_count(self.block_count)
  4658. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4659. self.gguf_writer.add_ssm_inner_size(d_inner)
  4660. self.gguf_writer.add_ssm_state_size(d_state)
  4661. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  4662. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4663. self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
  4664. self.gguf_writer.add_file_type(self.ftype)
  4665. _tok_embd = None
  4666. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4667. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  4668. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  4669. new_name = self.map_tensor_name(name)
  4670. if name.endswith(".A_log"):
  4671. logger.debug("A_log --> A ==> " + new_name)
  4672. data_torch = -torch.exp(data_torch)
  4673. # [4 1 8192 1] -> [4 8192 1 1]
  4674. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4675. data_torch = data_torch.squeeze()
  4676. # assuming token_embd.weight is seen before output.weight
  4677. if self._tok_embd is not None and new_name == output_name:
  4678. if torch.equal(self._tok_embd, data_torch):
  4679. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  4680. return []
  4681. elif new_name == tok_embd_name:
  4682. self._tok_embd = data_torch
  4683. return [(new_name, data_torch)]
  4684. @ModelBase.register("Mamba2ForCausalLM")
  4685. class Mamba2Model(TextModel):
  4686. model_arch = gguf.MODEL_ARCH.MAMBA2
  4687. def __init__(self, dir_model: Path, *args, **kwargs):
  4688. # Avoid using AutoConfig for hparams
  4689. # It wrongly assumes all Mamba2 models are Mamba-Codestral-7B-v0.1
  4690. hparams = kwargs.pop("hparams", None)
  4691. if hparams is None:
  4692. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  4693. hparams = json.load(f)
  4694. super().__init__(dir_model, *args, hparams=hparams, **kwargs)
  4695. self.d_model = self.find_hparam(["hidden_size", "d_model", "dim"])
  4696. self.d_inner = self.find_hparam(["mamba_d_ssm", "intermediate_size", "d_inner"], optional=True) or 2 * self.d_model
  4697. self.n_group = self.find_hparam(["n_groups"], optional=True) or 1
  4698. def set_vocab(self):
  4699. vocab_size = self.hparams["vocab_size"]
  4700. # Round vocab size to next multiple of 16
  4701. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 16)
  4702. # pad using ceiling division
  4703. # ref: https://stackoverflow.com/a/17511341/22827863
  4704. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  4705. self.hparams["vocab_size"] = vocab_size
  4706. if (self.dir_model / "tokenizer.model").is_file():
  4707. self._set_vocab_sentencepiece()
  4708. elif (self.dir_model / "tokenizer.model.v3").is_file():
  4709. # mamba-codestral
  4710. raise NotImplementedError(f"Please rename {self.dir_model / 'tokenizer.model.v3'} to {self.dir_model / 'tokenizer.model'}")
  4711. elif (self.dir_model / "tokenizer.json").is_file():
  4712. self._set_vocab_gpt2()
  4713. else:
  4714. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  4715. self._set_vocab_builtin("gpt-neox", vocab_size)
  4716. def set_gguf_parameters(self):
  4717. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  4718. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 128
  4719. head_dim = self.find_hparam(["mamba_d_head", "head_dim"], optional=True) or 64
  4720. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  4721. # Fail early for models which don't have a block expansion factor of 2
  4722. # TODO: does this really matter?
  4723. # skip the assertion for FalconH1 Model
  4724. if self.model_arch != gguf.MODEL_ARCH.FALCON_H1:
  4725. assert self.d_inner == 2 * self.d_model
  4726. assert self.d_inner % head_dim == 0
  4727. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  4728. self.gguf_writer.add_embedding_length(self.d_model)
  4729. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  4730. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  4731. self.gguf_writer.add_block_count(self.block_count)
  4732. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4733. self.gguf_writer.add_ssm_inner_size(self.d_inner)
  4734. self.gguf_writer.add_ssm_state_size(d_state)
  4735. self.gguf_writer.add_ssm_time_step_rank(self.d_inner // head_dim)
  4736. self.gguf_writer.add_ssm_group_count(self.n_group)
  4737. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4738. self.gguf_writer.add_file_type(self.ftype)
  4739. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4740. if name.startswith("model.backbone") or name.startswith("model.lm_head"):
  4741. # map Mamba-Codestral-7B-v0.1 tensor names to the names used by Mamba-2
  4742. name = name.removeprefix("model.")
  4743. if name.endswith(".dt_bias"):
  4744. name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
  4745. new_name = self.map_tensor_name(name)
  4746. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4747. data_torch = data_torch.squeeze()
  4748. elif any(self.match_model_tensor_name(new_name, t, bid, suffix="") for t in [
  4749. gguf.MODEL_TENSOR.SSM_A,
  4750. gguf.MODEL_TENSOR.SSM_D,
  4751. ]):
  4752. # unsqueeze A to use similar shape semantics as Mamba-1
  4753. # (D is also unsqueezed, but for more straightforward broadcast internally)
  4754. data_torch = data_torch.reshape((*data_torch.shape, 1))
  4755. elif self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_NORM, bid):
  4756. data_torch = data_torch.reshape((self.n_group, self.d_inner // self.n_group))
  4757. if name.endswith(".A_log"):
  4758. logger.debug("A_log --> A ==> " + new_name)
  4759. data_torch = -torch.exp(data_torch)
  4760. yield (new_name, data_torch)
  4761. @ModelBase.register("JambaForCausalLM")
  4762. class JambaModel(TextModel):
  4763. model_arch = gguf.MODEL_ARCH.JAMBA
  4764. def get_vocab_base_pre(self, tokenizer) -> str:
  4765. del tokenizer # unused
  4766. return "gpt-2"
  4767. def set_vocab(self):
  4768. if (self.dir_model / "tokenizer.model").is_file():
  4769. # Using Jamba's tokenizer.json causes errors on model load
  4770. # (something about "byte not found in vocab"),
  4771. # but there's a working tokenizer.model
  4772. self._set_vocab_sentencepiece()
  4773. else:
  4774. # Some Jamba models only have a tokenizer.json, which works.
  4775. self._set_vocab_gpt2()
  4776. def set_gguf_parameters(self):
  4777. d_model = self.find_hparam(["hidden_size", "mamba_d_model"])
  4778. d_conv = self.find_hparam(["mamba_d_conv"], optional=True) or 4
  4779. d_inner = self.hparams["mamba_expand"] * d_model
  4780. d_state = self.find_hparam(["mamba_d_state"], optional=True) or 16
  4781. # ceiling division
  4782. # ref: https://stackoverflow.com/a/17511341/22827863
  4783. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  4784. dt_rank = self.find_hparam(["mamba_dt_rank"], optional=True) or -(d_model // -16)
  4785. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-6
  4786. n_kv_head = self.hparams["num_key_value_heads"]
  4787. attn_offset = self.hparams["attn_layer_offset"]
  4788. attn_period = self.hparams["attn_layer_period"]
  4789. n_kv_vec = [0 for _ in range(attn_offset)] + [
  4790. n_kv_head if (i - attn_offset) % attn_period == 0 else 0 for i in range(attn_offset, self.block_count)
  4791. ]
  4792. self.gguf_writer.add_block_count(self.block_count)
  4793. self.gguf_writer.add_context_length(self.find_hparam(["max_position_embeddings", "n_ctx"]))
  4794. self.gguf_writer.add_embedding_length(d_model)
  4795. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  4796. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  4797. self.gguf_writer.add_head_count_kv(n_kv_vec)
  4798. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4799. self.gguf_writer.add_ssm_inner_size(d_inner)
  4800. self.gguf_writer.add_ssm_state_size(d_state)
  4801. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  4802. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4803. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  4804. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  4805. self.gguf_writer.add_file_type(self.ftype)
  4806. _experts: list[dict[str, Tensor]] | None = None
  4807. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4808. # Mini-Jamba
  4809. name = name.replace(".moe.", ".feed_forward.")
  4810. if bid is not None:
  4811. moe_offset = self.hparams["expert_layer_offset"]
  4812. moe_period = self.hparams["expert_layer_period"]
  4813. if not (bid >= moe_offset and (bid - moe_offset) % moe_period == 0):
  4814. name = name.replace(".experts.0.", ".")
  4815. # process the experts separately
  4816. if ".feed_forward.experts." in name:
  4817. n_experts = self.hparams["num_experts"]
  4818. assert bid is not None
  4819. if self._experts is None:
  4820. self._experts = [{} for _ in range(self.block_count)]
  4821. self._experts[bid][name] = data_torch
  4822. if len(self._experts[bid]) >= n_experts * 3:
  4823. # merge the experts into a single 3d tensor
  4824. for wid in ["down_proj", "gate_proj", "up_proj"]:
  4825. datas: list[Tensor] = []
  4826. for xid in range(n_experts):
  4827. ename = f"model.layers.{bid}.feed_forward.experts.{xid}.{wid}.weight"
  4828. datas.append(self._experts[bid][ename])
  4829. del self._experts[bid][ename]
  4830. data_torch = torch.stack(datas, dim=0)
  4831. # using the same merged name as qwen2moe
  4832. merged_name = f"model.layers.{bid}.mlp.experts.{wid}.weight"
  4833. new_name = self.map_tensor_name(merged_name)
  4834. yield new_name, data_torch
  4835. return
  4836. new_name = self.map_tensor_name(name)
  4837. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4838. data_torch = data_torch.squeeze()
  4839. if name.endswith(".A_log"):
  4840. logger.debug("A_log --> A ==> " + new_name)
  4841. data_torch = -torch.exp(data_torch)
  4842. yield (new_name, data_torch)
  4843. def prepare_tensors(self):
  4844. super().prepare_tensors()
  4845. if self._experts is not None:
  4846. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4847. experts = [k for d in self._experts for k in d.keys()]
  4848. if len(experts) > 0:
  4849. raise ValueError(f"Unprocessed experts: {experts}")
  4850. @ModelBase.register("CohereForCausalLM")
  4851. class CommandR2Model(TextModel):
  4852. model_arch = gguf.MODEL_ARCH.COMMAND_R
  4853. def __init__(self, *args, **kwargs):
  4854. super().__init__(*args, **kwargs)
  4855. # max_position_embeddings = 8192 in config.json but model was actually
  4856. # trained on 128k context length
  4857. # aya-23 models don't have model_max_length specified
  4858. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  4859. def set_gguf_parameters(self):
  4860. super().set_gguf_parameters()
  4861. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  4862. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4863. @ModelBase.register("Cohere2ForCausalLM")
  4864. class Cohere2Model(TextModel):
  4865. model_arch = gguf.MODEL_ARCH.COHERE2
  4866. def set_gguf_parameters(self):
  4867. super().set_gguf_parameters()
  4868. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  4869. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  4870. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  4871. rotary_pct = self.hparams["rotary_pct"]
  4872. hidden_size = self.hparams["hidden_size"]
  4873. num_attention_heads = self.hparams["num_attention_heads"]
  4874. self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
  4875. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4876. @ModelBase.register("OlmoForCausalLM")
  4877. @ModelBase.register("OLMoForCausalLM")
  4878. class OlmoModel(TextModel):
  4879. model_arch = gguf.MODEL_ARCH.OLMO
  4880. def set_gguf_parameters(self):
  4881. super().set_gguf_parameters()
  4882. self.gguf_writer.add_layer_norm_eps(1e-5)
  4883. clip_qkv = self.hparams.get("clip_qkv")
  4884. if clip_qkv is not None:
  4885. self.gguf_writer.add_clamp_kqv(clip_qkv)
  4886. # Same as super class, but permuting q_proj, k_proj
  4887. # Copied from: LlamaModel
  4888. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4889. del bid # unused
  4890. n_head = self.hparams["num_attention_heads"]
  4891. n_kv_head = self.hparams.get("num_key_value_heads")
  4892. if name.endswith("q_proj.weight"):
  4893. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  4894. if name.endswith("k_proj.weight"):
  4895. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  4896. return [(self.map_tensor_name(name), data_torch)]
  4897. @ModelBase.register("SeedOssForCausalLM")
  4898. class SeedOssModel(TextModel):
  4899. model_arch = gguf.MODEL_ARCH.SEED_OSS
  4900. @ModelBase.register("Olmo2ForCausalLM")
  4901. @ModelBase.register("Olmo3ForCausalLM")
  4902. class Olmo2Model(TextModel):
  4903. model_arch = gguf.MODEL_ARCH.OLMO2
  4904. def set_gguf_parameters(self):
  4905. super().set_gguf_parameters()
  4906. rope_scaling = self.hparams.get("rope_scaling") or {}
  4907. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4908. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4909. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4910. self.gguf_writer.add_rope_scaling_attn_factors(rope_scaling["attention_factor"])
  4911. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4912. if "sliding_window" in self.hparams:
  4913. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  4914. sliding_window_pattern = []
  4915. if "layer_types" in self.hparams:
  4916. sliding_window_pattern = [t == "sliding_attention" for t in self.hparams["layer_types"]]
  4917. else:
  4918. # Olmo2 does not use sliding window attention.
  4919. # Olmo3 defaults to using sliding window for all layers except every 4th.
  4920. for i in range(self.hparams["num_hidden_layers"]):
  4921. sliding_window_pattern.append((i + 1) % 4 != 0)
  4922. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  4923. @ModelBase.register("OlmoeForCausalLM")
  4924. class OlmoeModel(TextModel):
  4925. model_arch = gguf.MODEL_ARCH.OLMOE
  4926. def set_gguf_parameters(self):
  4927. super().set_gguf_parameters()
  4928. self.gguf_writer.add_layer_norm_rms_eps(1e-5)
  4929. if (n_experts := self.hparams.get("num_experts")) is not None:
  4930. self.gguf_writer.add_expert_count(n_experts)
  4931. _experts: list[dict[str, Tensor]] | None = None
  4932. # Copied from: Qwen2MoeModel
  4933. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4934. # process the experts separately
  4935. if name.find("experts") != -1:
  4936. n_experts = self.hparams["num_experts"]
  4937. assert bid is not None
  4938. if self._experts is None:
  4939. self._experts = [{} for _ in range(self.block_count)]
  4940. self._experts[bid][name] = data_torch
  4941. if len(self._experts[bid]) >= n_experts * 3:
  4942. tensors: list[tuple[str, Tensor]] = []
  4943. # merge the experts into a single 3d tensor
  4944. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4945. datas: list[Tensor] = []
  4946. for xid in range(n_experts):
  4947. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4948. datas.append(self._experts[bid][ename])
  4949. del self._experts[bid][ename]
  4950. data_torch = torch.stack(datas, dim=0)
  4951. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  4952. new_name = self.map_tensor_name(merged_name)
  4953. tensors.append((new_name, data_torch))
  4954. return tensors
  4955. else:
  4956. return []
  4957. return [(self.map_tensor_name(name), data_torch)]
  4958. # Copied from: Qwen2MoeModel
  4959. def prepare_tensors(self):
  4960. super().prepare_tensors()
  4961. if self._experts is not None:
  4962. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4963. experts = [k for d in self._experts for k in d.keys()]
  4964. if len(experts) > 0:
  4965. raise ValueError(f"Unprocessed experts: {experts}")
  4966. @ModelBase.register("JinaBertModel", "JinaBertForMaskedLM")
  4967. class JinaBertV2Model(BertModel):
  4968. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  4969. def set_vocab(self):
  4970. tokenizer_class = 'BertTokenizer'
  4971. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  4972. tokenizer_class = json.load(f)['tokenizer_class']
  4973. if tokenizer_class == 'BertTokenizer':
  4974. super().set_vocab()
  4975. elif tokenizer_class == 'RobertaTokenizer':
  4976. self._set_vocab_gpt2()
  4977. self.gguf_writer.add_token_type_count(2)
  4978. else:
  4979. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  4980. @ModelBase.register("OpenELMForCausalLM")
  4981. class OpenELMModel(TextModel):
  4982. model_arch = gguf.MODEL_ARCH.OPENELM
  4983. @staticmethod
  4984. def _make_divisible(v: float | int, divisor: int) -> int:
  4985. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  4986. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  4987. # Make sure that round down does not go down by more than 10%.
  4988. if new_v < 0.9 * v:
  4989. new_v += divisor
  4990. return new_v
  4991. def __init__(self, *args, **kwargs):
  4992. super().__init__(*args, **kwargs)
  4993. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  4994. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  4995. self._n_embd: int = self.hparams["model_dim"]
  4996. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  4997. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  4998. self._ffn_dims: list[int] = [
  4999. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  5000. for multiplier in ffn_multipliers
  5001. ]
  5002. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  5003. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  5004. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  5005. def set_vocab(self):
  5006. try:
  5007. self._set_vocab_sentencepiece()
  5008. except FileNotFoundError:
  5009. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  5010. def set_gguf_parameters(self):
  5011. n_embd = self._n_embd
  5012. head_dim = self.hparams["head_dim"]
  5013. rot_pct = 1.0
  5014. assert self.block_count == len(self._num_kv_heads)
  5015. assert self.block_count == len(self._num_query_heads)
  5016. assert self.block_count == len(self._ffn_dims)
  5017. self.gguf_writer.add_block_count(self.block_count)
  5018. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  5019. self.gguf_writer.add_embedding_length(n_embd)
  5020. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  5021. self.gguf_writer.add_head_count(self._num_query_heads)
  5022. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  5023. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  5024. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  5025. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  5026. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  5027. self.gguf_writer.add_key_length(head_dim)
  5028. self.gguf_writer.add_value_length(head_dim)
  5029. self.gguf_writer.add_file_type(self.ftype)
  5030. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  5031. if "n_layers" in keys:
  5032. return self.hparams["num_transformer_layers"]
  5033. return super().find_hparam(keys, optional)
  5034. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5035. # split ff
  5036. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  5037. ff_dim = self._ffn_dims[bid]
  5038. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  5039. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  5040. return
  5041. yield (self.map_tensor_name(name), data_torch)
  5042. @ModelBase.register("ArcticForCausalLM")
  5043. class ArcticModel(TextModel):
  5044. model_arch = gguf.MODEL_ARCH.ARCTIC
  5045. def set_vocab(self):
  5046. # The reason for using a custom implementation here is that the
  5047. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  5048. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  5049. from sentencepiece import SentencePieceProcessor
  5050. tokenizer_path = self.dir_model / 'tokenizer.model'
  5051. if not tokenizer_path.is_file():
  5052. logger.error(f'Error: Missing {tokenizer_path}')
  5053. sys.exit(1)
  5054. # Read the whole vocabulary from the tokenizer.model file
  5055. tokenizer = SentencePieceProcessor()
  5056. tokenizer.LoadFromFile(str(tokenizer_path))
  5057. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5058. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5059. scores: list[float] = [-10000.0] * vocab_size
  5060. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5061. for token_id in range(tokenizer.vocab_size()):
  5062. piece = tokenizer.IdToPiece(token_id)
  5063. text = piece.encode("utf-8")
  5064. score = tokenizer.GetScore(token_id)
  5065. toktype = SentencePieceTokenTypes.NORMAL
  5066. if tokenizer.IsUnknown(token_id):
  5067. toktype = SentencePieceTokenTypes.UNKNOWN
  5068. elif tokenizer.IsControl(token_id):
  5069. toktype = SentencePieceTokenTypes.CONTROL
  5070. elif tokenizer.IsUnused(token_id):
  5071. toktype = SentencePieceTokenTypes.UNUSED
  5072. elif tokenizer.IsByte(token_id):
  5073. toktype = SentencePieceTokenTypes.BYTE
  5074. tokens[token_id] = text
  5075. scores[token_id] = score
  5076. toktypes[token_id] = toktype
  5077. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  5078. # of information about added/redefined tokens and modify them accordingly.
  5079. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  5080. if tokenizer_config_file.is_file():
  5081. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  5082. tokenizer_config_json = json.load(f)
  5083. if "added_tokens_decoder" in tokenizer_config_json:
  5084. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  5085. for token_id, token_json in added_tokens_decoder.items():
  5086. token_id = int(token_id)
  5087. if token_id >= vocab_size:
  5088. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5089. continue
  5090. token_content = token_json["content"]
  5091. token_type = SentencePieceTokenTypes.USER_DEFINED
  5092. token_score = -10000.0
  5093. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  5094. # Set the score to 0.0 as in the original tokenizer.model
  5095. if ("special" in token_json) and token_json["special"]:
  5096. if token_content == tokenizer_config_json["unk_token"]:
  5097. token_type = SentencePieceTokenTypes.UNKNOWN
  5098. else:
  5099. token_type = SentencePieceTokenTypes.CONTROL
  5100. token_score = 0.0
  5101. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  5102. tokens[token_id] = token_content.encode("utf-8")
  5103. toktypes[token_id] = token_type
  5104. scores[token_id] = token_score
  5105. self.gguf_writer.add_tokenizer_model("llama")
  5106. self.gguf_writer.add_tokenizer_pre("default")
  5107. self.gguf_writer.add_token_list(tokens)
  5108. self.gguf_writer.add_token_scores(scores)
  5109. self.gguf_writer.add_token_types(toktypes)
  5110. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5111. special_vocab.add_to_gguf(self.gguf_writer)
  5112. def set_gguf_parameters(self):
  5113. super().set_gguf_parameters()
  5114. hparams = self.hparams
  5115. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5116. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  5117. _experts: list[dict[str, Tensor]] | None = None
  5118. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5119. n_head = self.hparams["num_attention_heads"]
  5120. n_kv_head = self.hparams.get("num_key_value_heads")
  5121. if name.endswith("q_proj.weight"):
  5122. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  5123. if name.endswith("k_proj.weight"):
  5124. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  5125. # process the experts separately
  5126. if name.find("block_sparse_moe.experts") != -1:
  5127. n_experts = self.hparams["num_local_experts"]
  5128. assert bid is not None
  5129. if self._experts is None:
  5130. self._experts = [{} for _ in range(self.block_count)]
  5131. self._experts[bid][name] = data_torch
  5132. if len(self._experts[bid]) >= n_experts * 3:
  5133. tensors: list[tuple[str, Tensor]] = []
  5134. # merge the experts into a single 3d tensor
  5135. for wid in ["w1", "w2", "w3"]:
  5136. datas: list[Tensor] = []
  5137. for xid in range(n_experts):
  5138. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  5139. datas.append(self._experts[bid][ename])
  5140. del self._experts[bid][ename]
  5141. data_torch = torch.stack(datas, dim=0)
  5142. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  5143. new_name = self.map_tensor_name(merged_name)
  5144. tensors.append((new_name, data_torch))
  5145. return tensors
  5146. else:
  5147. return []
  5148. return [(self.map_tensor_name(name), data_torch)]
  5149. def prepare_tensors(self):
  5150. super().prepare_tensors()
  5151. if self._experts is not None:
  5152. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5153. experts = [k for d in self._experts for k in d.keys()]
  5154. if len(experts) > 0:
  5155. raise ValueError(f"Unprocessed experts: {experts}")
  5156. @ModelBase.register("DeepseekForCausalLM")
  5157. class DeepseekModel(TextModel):
  5158. model_arch = gguf.MODEL_ARCH.DEEPSEEK
  5159. def set_vocab(self):
  5160. try:
  5161. self._set_vocab_sentencepiece()
  5162. except FileNotFoundError:
  5163. self._set_vocab_gpt2()
  5164. def set_gguf_parameters(self):
  5165. super().set_gguf_parameters()
  5166. hparams = self.hparams
  5167. if (rope_dim := hparams.get("head_dim")) is None:
  5168. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  5169. self.gguf_writer.add_rope_dimension_count(rope_dim)
  5170. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  5171. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  5172. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5173. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  5174. self.gguf_writer.add_expert_weights_scale(1.0)
  5175. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  5176. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  5177. _experts: list[dict[str, Tensor]] | None = None
  5178. @staticmethod
  5179. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  5180. if n_head_kv is not None and n_head != n_head_kv:
  5181. n_head = n_head_kv
  5182. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  5183. .swapaxes(1, 2)
  5184. .reshape(weights.shape))
  5185. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5186. n_head = self.hparams["num_attention_heads"]
  5187. n_kv_head = self.hparams.get("num_key_value_heads")
  5188. if name.endswith(("q_proj.weight", "q_proj.bias")):
  5189. data_torch = DeepseekModel.permute(data_torch, n_head, n_head)
  5190. if name.endswith(("k_proj.weight", "k_proj.bias")):
  5191. data_torch = DeepseekModel.permute(data_torch, n_head, n_kv_head)
  5192. # process the experts separately
  5193. if name.find("mlp.experts") != -1:
  5194. n_experts = self.hparams["n_routed_experts"]
  5195. assert bid is not None
  5196. if self._experts is None:
  5197. self._experts = [{} for _ in range(self.block_count)]
  5198. self._experts[bid][name] = data_torch
  5199. if len(self._experts[bid]) >= n_experts * 3:
  5200. tensors: list[tuple[str, Tensor]] = []
  5201. # merge the experts into a single 3d tensor
  5202. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5203. datas: list[Tensor] = []
  5204. for xid in range(n_experts):
  5205. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5206. datas.append(self._experts[bid][ename])
  5207. del self._experts[bid][ename]
  5208. data_torch = torch.stack(datas, dim=0)
  5209. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5210. new_name = self.map_tensor_name(merged_name)
  5211. tensors.append((new_name, data_torch))
  5212. return tensors
  5213. else:
  5214. return []
  5215. return [(self.map_tensor_name(name), data_torch)]
  5216. def prepare_tensors(self):
  5217. super().prepare_tensors()
  5218. if self._experts is not None:
  5219. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5220. experts = [k for d in self._experts for k in d.keys()]
  5221. if len(experts) > 0:
  5222. raise ValueError(f"Unprocessed experts: {experts}")
  5223. @ModelBase.register(
  5224. "DeepseekV2ForCausalLM",
  5225. "DeepseekV3ForCausalLM",
  5226. "KimiVLForConditionalGeneration",
  5227. )
  5228. class DeepseekV2Model(TextModel):
  5229. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  5230. def set_vocab(self):
  5231. try:
  5232. self._set_vocab_gpt2()
  5233. return
  5234. except Exception:
  5235. pass
  5236. from transformers import AutoTokenizer
  5237. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  5238. tokpre = self.get_vocab_base_pre(tokenizer)
  5239. if tokpre == "kimi-k2":
  5240. # Build merges list using the approach similar to HunYuanMoE
  5241. merges = []
  5242. vocab = {}
  5243. mergeable_ranks = tokenizer.model._mergeable_ranks
  5244. for token, rank in mergeable_ranks.items():
  5245. vocab[QwenModel.token_bytes_to_string(token)] = rank
  5246. if len(token) == 1:
  5247. continue
  5248. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  5249. if len(merged) == 2:
  5250. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  5251. # Build token list
  5252. vocab_size = self.hparams["vocab_size"]
  5253. special_tokens = tokenizer.special_tokens
  5254. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  5255. tokens: list[str] = []
  5256. toktypes: list[int] = []
  5257. for i in range(vocab_size):
  5258. if i not in reverse_vocab:
  5259. tokens.append(f"[PAD{i}]")
  5260. toktypes.append(gguf.TokenType.UNUSED)
  5261. else:
  5262. token = reverse_vocab[i]
  5263. tokens.append(token)
  5264. if i in special_tokens.values():
  5265. toktypes.append(gguf.TokenType.CONTROL)
  5266. else:
  5267. toktypes.append(gguf.TokenType.NORMAL)
  5268. self.gguf_writer.add_tokenizer_model("gpt2")
  5269. self.gguf_writer.add_tokenizer_pre(tokpre)
  5270. self.gguf_writer.add_token_list(tokens)
  5271. self.gguf_writer.add_token_types(toktypes)
  5272. self.gguf_writer.add_token_merges(merges)
  5273. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  5274. special_vocab.add_to_gguf(self.gguf_writer)
  5275. else:
  5276. raise NotImplementedError(f"Deepseek pre-tokenizer {tokpre!r} is not supported yet!")
  5277. def set_gguf_parameters(self):
  5278. # note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
  5279. self.hparams["num_key_value_heads"] = 1
  5280. super().set_gguf_parameters()
  5281. hparams = self.hparams
  5282. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  5283. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5284. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  5285. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  5286. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  5287. # note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
  5288. self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
  5289. self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
  5290. self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  5291. self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
  5292. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  5293. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  5294. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  5295. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  5296. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  5297. if hparams["scoring_func"] == "sigmoid":
  5298. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5299. elif hparams["scoring_func"] == "softmax":
  5300. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  5301. else:
  5302. raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
  5303. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  5304. rope_scaling = self.hparams.get("rope_scaling") or {}
  5305. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  5306. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  5307. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  5308. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  5309. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * rope_scaling["mscale_all_dim"])
  5310. _experts: list[dict[str, Tensor]] | None = None
  5311. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5312. # skip vision tensors and remove "language_model." for Kimi-VL
  5313. if "vision_tower" in name or "multi_modal_projector" in name:
  5314. return []
  5315. if name.startswith("language_model."):
  5316. name = name.replace("language_model.", "")
  5317. # rename e_score_correction_bias tensors
  5318. if name.endswith("e_score_correction_bias"):
  5319. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5320. # skip Multi-Token Prediction (MTP) layers
  5321. block_count = self.hparams["num_hidden_layers"]
  5322. match = re.match(r"model.layers.(\d+)", name)
  5323. if match and int(match.group(1)) >= block_count:
  5324. return []
  5325. # process the experts separately
  5326. if name.find("mlp.experts") != -1:
  5327. n_experts = self.hparams["n_routed_experts"]
  5328. assert bid is not None
  5329. if self._experts is None:
  5330. self._experts = [{} for _ in range(self.block_count)]
  5331. self._experts[bid][name] = data_torch
  5332. if len(self._experts[bid]) >= n_experts * 3:
  5333. tensors: list[tuple[str, Tensor]] = []
  5334. # merge the experts into a single 3d tensor
  5335. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5336. datas: list[Tensor] = []
  5337. for xid in range(n_experts):
  5338. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5339. datas.append(self._experts[bid][ename])
  5340. del self._experts[bid][ename]
  5341. data_torch = torch.stack(datas, dim=0)
  5342. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5343. new_name = self.map_tensor_name(merged_name)
  5344. tensors.append((new_name, data_torch))
  5345. return tensors
  5346. else:
  5347. return []
  5348. # note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
  5349. if name.endswith("kv_b_proj.weight"):
  5350. name_kb = name.replace("kv_b_proj", "k_b_proj")
  5351. name_vb = name.replace("kv_b_proj", "v_b_proj")
  5352. n_head_kv = self.hparams["num_key_value_heads"]
  5353. v_head_dim = self.hparams["v_head_dim"]
  5354. qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
  5355. assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
  5356. kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
  5357. k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
  5358. k_b = k_b.transpose(1, 2)
  5359. return [
  5360. (self.map_tensor_name(name_kb), k_b),
  5361. (self.map_tensor_name(name_vb), v_b)
  5362. ]
  5363. return [(self.map_tensor_name(name), data_torch)]
  5364. def prepare_tensors(self):
  5365. super().prepare_tensors()
  5366. if self._experts is not None:
  5367. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5368. experts = [k for d in self._experts for k in d.keys()]
  5369. if len(experts) > 0:
  5370. raise ValueError(f"Unprocessed experts: {experts}")
  5371. @ModelBase.register("Dots1ForCausalLM")
  5372. class Dots1Model(Qwen2MoeModel):
  5373. model_arch = gguf.MODEL_ARCH.DOTS1
  5374. def __init__(self, *args, **kwargs):
  5375. super().__init__(*args, **kwargs)
  5376. self.hparams["num_experts"] = self.hparams["n_routed_experts"]
  5377. def set_gguf_parameters(self):
  5378. super().set_gguf_parameters()
  5379. self.gguf_writer.add_leading_dense_block_count(self.hparams["first_k_dense_replace"])
  5380. self.gguf_writer.add_expert_shared_count(self.hparams["n_shared_experts"])
  5381. self.gguf_writer.add_expert_weights_scale(self.hparams["routed_scaling_factor"])
  5382. self.gguf_writer.add_expert_weights_norm(self.hparams["norm_topk_prob"])
  5383. if self.hparams["scoring_func"] == "noaux_tc":
  5384. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5385. else:
  5386. raise ValueError(f"Unsupported scoring_func value: {self.hparams['scoring_func']}")
  5387. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  5388. if name.endswith("e_score_correction_bias"):
  5389. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5390. if "shared_experts" in name:
  5391. return [(self.map_tensor_name(name), data_torch)]
  5392. return super().modify_tensors(data_torch, name, bid)
  5393. @ModelBase.register("PLMForCausalLM")
  5394. class PLMModel(TextModel):
  5395. model_arch = gguf.MODEL_ARCH.PLM
  5396. def set_vocab(self):
  5397. self._set_vocab_gpt2()
  5398. def set_gguf_parameters(self):
  5399. super().set_gguf_parameters()
  5400. hparams = self.hparams
  5401. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5402. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  5403. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  5404. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  5405. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  5406. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5407. return [(self.map_tensor_name(name), data_torch)]
  5408. def prepare_tensors(self):
  5409. super().prepare_tensors()
  5410. @ModelBase.register("T5WithLMHeadModel")
  5411. @ModelBase.register("T5ForConditionalGeneration")
  5412. @ModelBase.register("MT5ForConditionalGeneration")
  5413. @ModelBase.register("UMT5ForConditionalGeneration")
  5414. class T5Model(TextModel):
  5415. model_arch = gguf.MODEL_ARCH.T5
  5416. def __init__(self, *args, **kwargs):
  5417. super().__init__(*args, **kwargs)
  5418. self.shared_token_embeddings_found = False
  5419. def set_vocab(self):
  5420. # to avoid TypeError: Descriptors cannot be created directly
  5421. # exception when importing sentencepiece_model_pb2
  5422. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  5423. from sentencepiece import SentencePieceProcessor
  5424. from sentencepiece import sentencepiece_model_pb2 as model
  5425. tokenizer_path = self.dir_model / 'tokenizer.model'
  5426. # many older models use spiece.model tokenizer model filename
  5427. if not tokenizer_path.is_file():
  5428. tokenizer_path = self.dir_model / 'spiece.model'
  5429. if not tokenizer_path.is_file():
  5430. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  5431. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  5432. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  5433. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  5434. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  5435. # assure the tokenizer model file name is correct
  5436. assert tokenizer_path.name == 'tokenizer.model'
  5437. return self._set_vocab_sentencepiece()
  5438. else:
  5439. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  5440. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  5441. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  5442. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  5443. tokenizer = SentencePieceProcessor()
  5444. tokenizer.LoadFromFile(str(tokenizer_path))
  5445. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5446. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5447. scores: list[float] = [-10000.0] * vocab_size
  5448. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5449. for token_id in range(tokenizer.vocab_size()):
  5450. piece = tokenizer.IdToPiece(token_id)
  5451. text = piece.encode("utf-8")
  5452. score = tokenizer.GetScore(token_id)
  5453. toktype = SentencePieceTokenTypes.NORMAL
  5454. if tokenizer.IsUnknown(token_id):
  5455. toktype = SentencePieceTokenTypes.UNKNOWN
  5456. elif tokenizer.IsControl(token_id):
  5457. toktype = SentencePieceTokenTypes.CONTROL
  5458. elif tokenizer.IsUnused(token_id):
  5459. toktype = SentencePieceTokenTypes.UNUSED
  5460. elif tokenizer.IsByte(token_id):
  5461. toktype = SentencePieceTokenTypes.BYTE
  5462. tokens[token_id] = text
  5463. scores[token_id] = score
  5464. toktypes[token_id] = toktype
  5465. added_tokens_file = self.dir_model / 'added_tokens.json'
  5466. if added_tokens_file.is_file():
  5467. with open(added_tokens_file, "r", encoding="utf-8") as f:
  5468. added_tokens_json = json.load(f)
  5469. for key in added_tokens_json:
  5470. token_id = added_tokens_json[key]
  5471. if token_id >= vocab_size:
  5472. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5473. continue
  5474. tokens[token_id] = key.encode("utf-8")
  5475. scores[token_id] = -1000.0
  5476. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  5477. if vocab_size > len(tokens):
  5478. pad_count = vocab_size - len(tokens)
  5479. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  5480. for i in range(1, pad_count + 1):
  5481. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  5482. scores.append(-1000.0)
  5483. toktypes.append(SentencePieceTokenTypes.UNUSED)
  5484. self.gguf_writer.add_tokenizer_model("t5")
  5485. self.gguf_writer.add_tokenizer_pre("default")
  5486. self.gguf_writer.add_token_list(tokens)
  5487. self.gguf_writer.add_token_scores(scores)
  5488. self.gguf_writer.add_token_types(toktypes)
  5489. self.gguf_writer.add_add_space_prefix(add_prefix)
  5490. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  5491. if precompiled_charsmap:
  5492. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  5493. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5494. special_vocab.add_to_gguf(self.gguf_writer)
  5495. def set_gguf_parameters(self):
  5496. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  5497. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  5498. n_ctx = 512
  5499. self.gguf_writer.add_context_length(n_ctx)
  5500. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  5501. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  5502. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  5503. if (dec_n_layer := self.hparams.get("num_decoder_layers")) is not None:
  5504. self.gguf_writer.add_decoder_block_count(dec_n_layer)
  5505. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  5506. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  5507. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  5508. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5509. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  5510. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  5511. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  5512. self.gguf_writer.add_file_type(self.ftype)
  5513. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5514. del bid # unused
  5515. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  5516. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  5517. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  5518. # and decoder and ignore the remaining ones.
  5519. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  5520. if not self.shared_token_embeddings_found:
  5521. name = "shared.weight"
  5522. self.shared_token_embeddings_found = True
  5523. else:
  5524. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  5525. return []
  5526. return [(self.map_tensor_name(name), data_torch)]
  5527. @ModelBase.register("T5EncoderModel")
  5528. class T5EncoderModel(TextModel):
  5529. model_arch = gguf.MODEL_ARCH.T5ENCODER
  5530. def __init__(self, *args, **kwargs):
  5531. super().__init__(*args, **kwargs)
  5532. self.shared_token_embeddings_found = False
  5533. def set_vocab(self):
  5534. # to avoid TypeError: Descriptors cannot be created directly
  5535. # exception when importing sentencepiece_model_pb2
  5536. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  5537. from sentencepiece import SentencePieceProcessor
  5538. from sentencepiece import sentencepiece_model_pb2 as model
  5539. tokenizer_path = self.dir_model / 'tokenizer.model'
  5540. # many older models use spiece.model tokenizer model filename
  5541. if not tokenizer_path.is_file():
  5542. tokenizer_path = self.dir_model / 'spiece.model'
  5543. if not tokenizer_path.is_file():
  5544. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  5545. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  5546. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  5547. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  5548. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  5549. # assure the tokenizer model file name is correct
  5550. assert tokenizer_path.name == 'tokenizer.model'
  5551. return self._set_vocab_sentencepiece()
  5552. else:
  5553. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  5554. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  5555. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  5556. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  5557. tokenizer = SentencePieceProcessor()
  5558. tokenizer.LoadFromFile(str(tokenizer_path))
  5559. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5560. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5561. scores: list[float] = [-10000.0] * vocab_size
  5562. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5563. for token_id in range(tokenizer.vocab_size()):
  5564. piece = tokenizer.IdToPiece(token_id)
  5565. text = piece.encode("utf-8")
  5566. score = tokenizer.GetScore(token_id)
  5567. toktype = SentencePieceTokenTypes.NORMAL
  5568. if tokenizer.IsUnknown(token_id):
  5569. toktype = SentencePieceTokenTypes.UNKNOWN
  5570. elif tokenizer.IsControl(token_id):
  5571. toktype = SentencePieceTokenTypes.CONTROL
  5572. elif tokenizer.IsUnused(token_id):
  5573. toktype = SentencePieceTokenTypes.UNUSED
  5574. elif tokenizer.IsByte(token_id):
  5575. toktype = SentencePieceTokenTypes.BYTE
  5576. tokens[token_id] = text
  5577. scores[token_id] = score
  5578. toktypes[token_id] = toktype
  5579. added_tokens_file = self.dir_model / 'added_tokens.json'
  5580. if added_tokens_file.is_file():
  5581. with open(added_tokens_file, "r", encoding="utf-8") as f:
  5582. added_tokens_json = json.load(f)
  5583. for key in added_tokens_json:
  5584. token_id = added_tokens_json[key]
  5585. if token_id >= vocab_size:
  5586. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5587. continue
  5588. tokens[token_id] = key.encode("utf-8")
  5589. scores[token_id] = -1000.0
  5590. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  5591. if vocab_size > len(tokens):
  5592. pad_count = vocab_size - len(tokens)
  5593. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  5594. for i in range(1, pad_count + 1):
  5595. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  5596. scores.append(-1000.0)
  5597. toktypes.append(SentencePieceTokenTypes.UNUSED)
  5598. self.gguf_writer.add_tokenizer_model("t5")
  5599. self.gguf_writer.add_tokenizer_pre("default")
  5600. self.gguf_writer.add_token_list(tokens)
  5601. self.gguf_writer.add_token_scores(scores)
  5602. self.gguf_writer.add_token_types(toktypes)
  5603. self.gguf_writer.add_add_space_prefix(add_prefix)
  5604. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  5605. if precompiled_charsmap:
  5606. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  5607. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5608. special_vocab.add_to_gguf(self.gguf_writer)
  5609. def set_gguf_parameters(self):
  5610. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  5611. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  5612. n_ctx = 512
  5613. self.gguf_writer.add_context_length(n_ctx)
  5614. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  5615. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  5616. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  5617. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  5618. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  5619. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  5620. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5621. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  5622. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  5623. self.gguf_writer.add_file_type(self.ftype)
  5624. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5625. del bid # unused
  5626. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  5627. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  5628. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  5629. # and decoder and ignore the remaining ones.
  5630. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  5631. if not self.shared_token_embeddings_found:
  5632. name = "shared.weight"
  5633. self.shared_token_embeddings_found = True
  5634. else:
  5635. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  5636. return []
  5637. return [(self.map_tensor_name(name), data_torch)]
  5638. @ModelBase.register("JAISLMHeadModel")
  5639. class JaisModel(TextModel):
  5640. model_arch = gguf.MODEL_ARCH.JAIS
  5641. def __init__(self, *args, **kwargs):
  5642. super().__init__(*args, **kwargs)
  5643. # SwigLU activation
  5644. assert self.hparams["activation_function"] == "swiglu"
  5645. # ALiBi position embedding
  5646. assert self.hparams["position_embedding_type"] == "alibi"
  5647. # Embeddings scale
  5648. self.embeddings_scale = 1.0
  5649. if 'mup_embeddings_scale' in self.hparams:
  5650. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  5651. elif 'embeddings_scale' in self.hparams:
  5652. self.embeddings_scale = self.hparams['embeddings_scale']
  5653. else:
  5654. assert False
  5655. self.width_scale = 1.0
  5656. if 'mup_output_alpha' in self.hparams:
  5657. assert 'mup_width_scale' in self.hparams
  5658. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  5659. elif 'width_scale' in self.hparams:
  5660. self.width_scale = self.hparams['width_scale']
  5661. else:
  5662. assert False
  5663. self.max_alibi_bias = 8.0
  5664. def set_vocab(self):
  5665. self._set_vocab_gpt2()
  5666. def set_gguf_parameters(self):
  5667. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  5668. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  5669. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  5670. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  5671. self.gguf_writer.add_head_count(self.hparams["n_head"])
  5672. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5673. self.gguf_writer.add_file_type(self.ftype)
  5674. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5675. del bid # unused
  5676. tensors: list[tuple[str, Tensor]] = []
  5677. # we don't need these
  5678. if name.endswith((".attn.bias")):
  5679. return tensors
  5680. if name.endswith(("relative_pe.slopes")):
  5681. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  5682. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  5683. # but Jais's PyTorch model simply precalculates the slope values and places them
  5684. # in relative_pes.slopes
  5685. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  5686. first_val = float(data_torch[0].item())
  5687. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  5688. return tensors
  5689. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  5690. data_torch = data_torch.transpose(1, 0)
  5691. new_name = self.map_tensor_name(name)
  5692. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  5693. tensors.append((new_name, data_torch * self.embeddings_scale))
  5694. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  5695. tensors.append((new_name, data_torch * self.width_scale))
  5696. else:
  5697. tensors.append((new_name, data_torch))
  5698. return tensors
  5699. def prepare_tensors(self):
  5700. super().prepare_tensors()
  5701. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  5702. @ModelBase.register("Glm4ForCausalLM", "Glm4vForConditionalGeneration")
  5703. class Glm4Model(TextModel):
  5704. model_arch = gguf.MODEL_ARCH.GLM4
  5705. def set_vocab(self):
  5706. from transformers import AutoTokenizer
  5707. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  5708. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5709. tokens, toktypes, tokpre = self.get_vocab_base()
  5710. self.gguf_writer.add_tokenizer_model("gpt2")
  5711. self.gguf_writer.add_tokenizer_pre(tokpre)
  5712. self.gguf_writer.add_token_list(tokens)
  5713. self.gguf_writer.add_token_types(toktypes)
  5714. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5715. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5716. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  5717. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  5718. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5719. special_vocab.add_to_gguf(self.gguf_writer)
  5720. def set_gguf_parameters(self):
  5721. super().set_gguf_parameters()
  5722. if (rope_dim := self.hparams.get("head_dim")) is None:
  5723. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  5724. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  5725. rope_scaling = self.hparams.get("rope_scaling") or {}
  5726. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  5727. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  5728. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  5729. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  5730. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5731. if name.startswith("model.visual."): # ignore visual part of Glm4v
  5732. return []
  5733. elif name.startswith("model.language_model."):
  5734. name = name.replace("language_model.", "") # for Glm4v
  5735. return super().modify_tensors(data_torch, name, bid)
  5736. @ModelBase.register("Glm4MoeForCausalLM")
  5737. class Glm4MoeModel(TextModel):
  5738. model_arch = gguf.MODEL_ARCH.GLM4_MOE
  5739. def __init__(self, *args, **kwargs):
  5740. super().__init__(*args, **kwargs)
  5741. # GLM4_MOE has num_hidden_layers + 1 actual layers (including NextN layer)
  5742. self.block_count = self.hparams["num_hidden_layers"] + self.hparams.get("num_nextn_predict_layers", 0)
  5743. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  5744. def set_vocab(self):
  5745. from transformers import AutoTokenizer
  5746. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  5747. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5748. tokens, toktypes, tokpre = self.get_vocab_base()
  5749. self.gguf_writer.add_tokenizer_model("gpt2")
  5750. self.gguf_writer.add_tokenizer_pre(tokpre)
  5751. self.gguf_writer.add_token_list(tokens)
  5752. self.gguf_writer.add_token_types(toktypes)
  5753. # Special tokens
  5754. # Note: Using <|endoftext|> (151329) for eot causes endless generation
  5755. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["[gMASK]"]) # 151331
  5756. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) # 151336
  5757. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) # 151329
  5758. special_vocab._set_special_token("eom", tokenizer.get_added_vocab()["<|observation|>"]) # 151338
  5759. # Patch broken chat template
  5760. if isinstance(special_vocab.chat_template, str) and "visible_text(m.content).endswith" in special_vocab.chat_template:
  5761. special_vocab.chat_template = special_vocab.chat_template.replace(
  5762. """{{ visible_text(m.content) }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}""",
  5763. """{% set content = visible_text(m.content) %}{{ content }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not content.endswith("/nothink")) else '' -}}""")
  5764. special_vocab.add_to_gguf(self.gguf_writer)
  5765. def set_gguf_parameters(self):
  5766. super().set_gguf_parameters()
  5767. if (rope_dim := self.hparams.get("head_dim")) is None:
  5768. rope_dim = (
  5769. self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  5770. )
  5771. self.gguf_writer.add_rope_dimension_count(
  5772. int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))
  5773. )
  5774. # MoE parameters - Use only routed expert count (shared experts handled separately)
  5775. if (n_routed_experts := self.hparams.get("n_routed_experts")) is not None:
  5776. self.gguf_writer.add_expert_count(n_routed_experts)
  5777. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  5778. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  5779. if (n_shared_experts := self.hparams.get("n_shared_experts")) is not None:
  5780. self.gguf_writer.add_expert_shared_count(n_shared_experts)
  5781. if (first_k_dense_replace := self.hparams.get("first_k_dense_replace")) is not None:
  5782. self.gguf_writer.add_leading_dense_block_count(first_k_dense_replace)
  5783. # Expert gating function (sigmoid for GLM4_MOE)
  5784. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5785. # Routed scaling factor
  5786. if (routed_scaling_factor := self.hparams.get("routed_scaling_factor")) is not None:
  5787. self.gguf_writer.add_expert_weights_scale(routed_scaling_factor)
  5788. # Normalise topk probabilities
  5789. if (norm_topk_prob := self.hparams.get("norm_topk_prob")) is not None:
  5790. self.gguf_writer.add_expert_weights_norm(norm_topk_prob)
  5791. # NextN/MTP prediction layers
  5792. if (num_nextn_predict_layers := self.hparams.get("num_nextn_predict_layers")) is not None:
  5793. self.gguf_writer.add_nextn_predict_layers(num_nextn_predict_layers)
  5794. _experts: list[dict[str, Tensor]] | None = None
  5795. def modify_tensors(
  5796. self, data_torch: Tensor, name: str, bid: int | None
  5797. ) -> Iterable[tuple[str, Tensor]]:
  5798. if name.startswith("model.visual."): # ignore visual part
  5799. return []
  5800. elif name.startswith("model.language_model."):
  5801. name = name.replace("language_model.", "") # for multimodal variants
  5802. # Handle main token embedding (but not layer-specific NextN embeddings)
  5803. if name == "model.embed_tokens.weight" and ".layers." not in name:
  5804. return [(self.map_tensor_name("token_embd.weight"), data_torch)]
  5805. # Handle routed experts
  5806. if name.find("mlp.experts") != -1:
  5807. n_experts = self.hparams["n_routed_experts"]
  5808. assert bid is not None
  5809. if self._experts is None:
  5810. self._experts = [{} for _ in range(self.block_count)]
  5811. self._experts[bid][name] = data_torch
  5812. if len(self._experts[bid]) >= n_experts * 3:
  5813. tensors: list[tuple[str, Tensor]] = []
  5814. # merge the experts into a single 3d tensor
  5815. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5816. datas: list[Tensor] = []
  5817. for xid in range(n_experts):
  5818. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5819. datas.append(self._experts[bid][ename])
  5820. del self._experts[bid][ename]
  5821. data_torch = torch.stack(datas, dim=0)
  5822. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5823. new_name = self.map_tensor_name(merged_name)
  5824. tensors.append((new_name, data_torch))
  5825. return tensors
  5826. else:
  5827. return []
  5828. if name.endswith("e_score_correction_bias"):
  5829. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5830. new_name = self.map_tensor_name(name)
  5831. return [(new_name, data_torch)]
  5832. def prepare_tensors(self):
  5833. super().prepare_tensors()
  5834. if self._experts is not None:
  5835. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5836. experts = [k for d in self._experts for k in d.keys()]
  5837. if len(experts) > 0:
  5838. raise ValueError(f"Unprocessed experts: {experts}")
  5839. @ModelBase.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
  5840. class ChatGLMModel(TextModel):
  5841. model_arch = gguf.MODEL_ARCH.CHATGLM
  5842. def set_vocab_chatglm3(self):
  5843. dir_model = self.dir_model
  5844. hparams = self.hparams
  5845. tokens: list[bytes] = []
  5846. toktypes: list[int] = []
  5847. scores: list[float] = []
  5848. from transformers import AutoTokenizer
  5849. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  5850. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  5851. assert max(tokenizer.get_vocab().values()) < vocab_size
  5852. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  5853. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  5854. for token_id in range(vocab_size):
  5855. piece = tokenizer._convert_id_to_token(token_id)
  5856. if token_id == 0:
  5857. piece = "<unk>"
  5858. elif token_id == 1:
  5859. piece = "<bos>"
  5860. elif token_id == 2:
  5861. piece = "<eos>"
  5862. text = piece.encode("utf-8")
  5863. score = 0.0
  5864. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  5865. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  5866. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  5867. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  5868. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  5869. if piece in special_tokens:
  5870. toktype = SentencePieceTokenTypes.CONTROL
  5871. elif len(piece) == 0:
  5872. text = f"[PAD{token_id}]".encode("utf-8")
  5873. toktype = SentencePieceTokenTypes.UNUSED
  5874. else:
  5875. toktype = SentencePieceTokenTypes.USER_DEFINED
  5876. tokens.append(text)
  5877. scores.append(score)
  5878. toktypes.append(toktype)
  5879. continue
  5880. toktype = SentencePieceTokenTypes.NORMAL
  5881. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  5882. toktype = SentencePieceTokenTypes.UNKNOWN
  5883. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  5884. toktype = SentencePieceTokenTypes.CONTROL
  5885. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  5886. toktype = SentencePieceTokenTypes.UNUSED
  5887. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  5888. toktype = SentencePieceTokenTypes.BYTE
  5889. tokens.append(text)
  5890. scores.append(score)
  5891. toktypes.append(toktype)
  5892. self.gguf_writer.add_tokenizer_model("llama")
  5893. # glm3 needs prefix and suffix formatted as:
  5894. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  5895. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  5896. self.gguf_writer.add_token_list(tokens)
  5897. self.gguf_writer.add_token_scores(scores)
  5898. self.gguf_writer.add_token_types(toktypes)
  5899. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5900. special_vocab.add_to_gguf(self.gguf_writer)
  5901. @staticmethod
  5902. def token_bytes_to_string(b):
  5903. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  5904. byte_encoder = bytes_to_unicode()
  5905. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  5906. @staticmethod
  5907. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  5908. parts = [bytes([b]) for b in token]
  5909. while True:
  5910. min_idx = None
  5911. min_rank = None
  5912. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  5913. rank = mergeable_ranks.get(pair[0] + pair[1])
  5914. if rank is not None and (min_rank is None or rank < min_rank):
  5915. min_idx = i
  5916. min_rank = rank
  5917. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  5918. break
  5919. assert min_idx is not None
  5920. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  5921. return parts
  5922. def set_vocab(self):
  5923. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  5924. self.set_vocab_chatglm3()
  5925. return
  5926. dir_model = self.dir_model
  5927. hparams = self.hparams
  5928. tokens: list[str] = []
  5929. toktypes: list[int] = []
  5930. from transformers import AutoTokenizer
  5931. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  5932. vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"])
  5933. assert max(tokenizer.get_vocab().values()) < vocab_size
  5934. tokens, toktypes, tokpre = self.get_vocab_base()
  5935. self.gguf_writer.add_tokenizer_model("gpt2")
  5936. self.gguf_writer.add_tokenizer_pre(tokpre)
  5937. self.gguf_writer.add_token_list(tokens)
  5938. self.gguf_writer.add_token_types(toktypes)
  5939. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5940. # only add special tokens when they were not already loaded from config.json
  5941. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5942. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  5943. # this one is usually not in config.json anyway
  5944. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  5945. special_vocab.add_to_gguf(self.gguf_writer)
  5946. def set_gguf_parameters(self):
  5947. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  5948. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  5949. n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head))
  5950. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  5951. self.gguf_writer.add_embedding_length(n_embed)
  5952. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed)))
  5953. self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"]))
  5954. self.gguf_writer.add_head_count(n_head)
  5955. self.gguf_writer.add_head_count_kv(n_head_kv)
  5956. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5))
  5957. self.gguf_writer.add_file_type(self.ftype)
  5958. if "attention_dim" in self.hparams:
  5959. rope_dim = self.hparams["attention_dim"]
  5960. else:
  5961. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  5962. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  5963. self.gguf_writer.add_add_bos_token(False)
  5964. rope_freq = 10000
  5965. if "rope_ratio" in self.hparams:
  5966. rope_freq = rope_freq * self.hparams["rope_ratio"]
  5967. self.gguf_writer.add_rope_freq_base(rope_freq)
  5968. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5969. del bid # unused
  5970. if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."):
  5971. return []
  5972. name = name.removeprefix("transformer.")
  5973. return [(self.map_tensor_name(name), data_torch)]
  5974. @ModelBase.register("NemotronForCausalLM")
  5975. class NemotronModel(TextModel):
  5976. model_arch = gguf.MODEL_ARCH.NEMOTRON
  5977. def set_vocab(self):
  5978. self._set_vocab_sentencepiece()
  5979. self.gguf_writer.add_pad_token_id(0)
  5980. self.gguf_writer.add_unk_token_id(1)
  5981. def set_gguf_parameters(self):
  5982. super().set_gguf_parameters()
  5983. hparams = self.hparams
  5984. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5985. f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
  5986. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  5987. # * Partial RoPE
  5988. rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
  5989. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  5990. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  5991. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  5992. # * RopeScaling for Nemotron
  5993. if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
  5994. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  5995. else:
  5996. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  5997. self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
  5998. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5999. # * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
  6000. # model.layers.{l}.input_layernorm.weight
  6001. # model.layers.{l}.post_attention_layernorm.weight
  6002. # model.norm.weight
  6003. if name.endswith("norm.weight"):
  6004. data_torch = data_torch + 1
  6005. return [(self.map_tensor_name(name), data_torch)]
  6006. @ModelBase.register("ExaoneForCausalLM")
  6007. class ExaoneModel(TextModel):
  6008. model_arch = gguf.MODEL_ARCH.EXAONE
  6009. def set_gguf_parameters(self):
  6010. hparams = self.hparams
  6011. assert (hparams["activation_function"] == "silu")
  6012. max_position_embeddings = hparams["max_position_embeddings"]
  6013. embed_dim = hparams["hidden_size"]
  6014. num_heads = hparams["num_attention_heads"]
  6015. num_kv_heads = hparams.get("num_key_value_heads", num_heads)
  6016. layer_norm_eps = hparams["layer_norm_epsilon"]
  6017. intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
  6018. num_layers = hparams["num_layers"]
  6019. # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
  6020. # attention_dropout_rate = hparams["attention_dropout"]
  6021. # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
  6022. # embed_dropout_rate = hparams["embed_dropout"]
  6023. self.gguf_writer.add_embedding_length(embed_dim)
  6024. self.gguf_writer.add_head_count(num_heads)
  6025. self.gguf_writer.add_head_count_kv(num_kv_heads)
  6026. self.gguf_writer.add_context_length(max_position_embeddings)
  6027. self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
  6028. self.gguf_writer.add_feed_forward_length(intermediate_size)
  6029. self.gguf_writer.add_block_count(num_layers)
  6030. self.gguf_writer.add_file_type(self.ftype)
  6031. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  6032. self.gguf_writer.add_rope_freq_base(rope_theta)
  6033. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
  6034. rotary_factor = rotary_factor if rotary_factor is not None else 1.0
  6035. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  6036. rope_scaling = self.hparams.get("rope_scaling") or {}
  6037. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  6038. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6039. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6040. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6041. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  6042. if rope_scaling.get("rope_type", '').lower() == "llama3":
  6043. base = self.hparams.get("rope_theta", 10000.0)
  6044. if (dim := self.hparams.get("head_dim")) is None:
  6045. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6046. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  6047. factor = rope_scaling.get("factor", 8.0)
  6048. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  6049. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  6050. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  6051. low_freq_wavelen = old_context_len / low_freq_factor
  6052. high_freq_wavelen = old_context_len / high_freq_factor
  6053. assert low_freq_wavelen != high_freq_wavelen
  6054. rope_factors = []
  6055. for freq in freqs:
  6056. wavelen = 2 * math.pi / freq
  6057. if wavelen < high_freq_wavelen:
  6058. rope_factors.append(1)
  6059. elif wavelen > low_freq_wavelen:
  6060. rope_factors.append(factor)
  6061. else:
  6062. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  6063. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  6064. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  6065. @ModelBase.register("Exaone4ForCausalLM")
  6066. class Exaone4Model(TextModel):
  6067. model_arch = gguf.MODEL_ARCH.EXAONE4
  6068. def set_vocab(self):
  6069. tokens, toktypes, tokpre = self.get_vocab_base()
  6070. self.gguf_writer.add_tokenizer_model("gpt2")
  6071. self.gguf_writer.add_tokenizer_pre(tokpre)
  6072. self.gguf_writer.add_token_list(tokens)
  6073. self.gguf_writer.add_token_types(toktypes)
  6074. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  6075. special_vocab.add_to_gguf(self.gguf_writer)
  6076. def set_gguf_parameters(self):
  6077. super().set_gguf_parameters()
  6078. hparams = self.hparams
  6079. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6080. if hparams.get("sliding_window") is not None:
  6081. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  6082. if "layer_types" in hparams:
  6083. self.gguf_writer.add_sliding_window_pattern([t == "sliding_attention" for t in hparams["layer_types"]])
  6084. elif "sliding_window_pattern" in hparams:
  6085. sliding_window_pattern = []
  6086. if isinstance(hparams["sliding_window_pattern"], str): # e.g. LLLG
  6087. for i in range(hparams["num_hidden_layers"]):
  6088. sliding_window_pattern.append(hparams["sliding_window_pattern"][i % len(hparams["sliding_window_pattern"])] == "L")
  6089. if isinstance(hparams["sliding_window_pattern"], int): # e.g. 4
  6090. for i in range(hparams["num_hidden_layers"]):
  6091. sliding_window_pattern.append((i + 1) % hparams["sliding_window_pattern"] != 0)
  6092. if len(sliding_window_pattern) == hparams["num_hidden_layers"]:
  6093. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  6094. rope_scaling = self.hparams.get("rope_scaling") or {}
  6095. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  6096. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6097. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6098. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6099. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  6100. if rope_scaling.get("rope_type", '').lower() == "llama3":
  6101. base = self.hparams.get("rope_theta", 10_000.0)
  6102. if (dim := self.hparams.get("head_dim")) is None:
  6103. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6104. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  6105. factor = rope_scaling.get("factor", 16.0)
  6106. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  6107. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  6108. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  6109. low_freq_wavelen = old_context_len / low_freq_factor
  6110. high_freq_wavelen = old_context_len / high_freq_factor
  6111. rope_factors = []
  6112. for freq in freqs:
  6113. wavelen = 2 * math.pi / freq
  6114. if wavelen < high_freq_wavelen:
  6115. rope_factors.append(1)
  6116. elif wavelen > low_freq_wavelen:
  6117. rope_factors.append(factor)
  6118. else:
  6119. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  6120. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  6121. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  6122. @ModelBase.register("GraniteForCausalLM")
  6123. class GraniteModel(LlamaModel):
  6124. """Conversion for IBM's GraniteForCausalLM"""
  6125. model_arch = gguf.MODEL_ARCH.GRANITE
  6126. def set_gguf_parameters(self):
  6127. """Granite uses standard llama parameters with the following differences:
  6128. - No head_dim support
  6129. - New multiplier params:
  6130. - attention_scale
  6131. - embedding_scale
  6132. - residual_scale
  6133. - logits_scaling
  6134. """
  6135. if head_dim := self.hparams.pop("head_dim", None):
  6136. logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
  6137. super().set_gguf_parameters()
  6138. # NOTE: Convert _multiplier params to _scale params for naming
  6139. # consistency
  6140. if attention_scale := self.hparams.get("attention_multiplier"):
  6141. self.gguf_writer.add_attention_scale(attention_scale)
  6142. logger.info("gguf: (granite) attention_scale = %s", attention_scale)
  6143. if embedding_scale := self.hparams.get("embedding_multiplier"):
  6144. self.gguf_writer.add_embedding_scale(embedding_scale)
  6145. logger.info("gguf: (granite) embedding_scale = %s", embedding_scale)
  6146. if residual_scale := self.hparams.get("residual_multiplier"):
  6147. self.gguf_writer.add_residual_scale(residual_scale)
  6148. logger.info("gguf: (granite) residual_scale = %s", residual_scale)
  6149. if logits_scale := self.hparams.get("logits_scaling"):
  6150. self.gguf_writer.add_logit_scale(logits_scale)
  6151. logger.info("gguf: (granite) logits_scale = %s", logits_scale)
  6152. @ModelBase.register("GraniteMoeForCausalLM", "GraniteMoeSharedForCausalLM")
  6153. class GraniteMoeModel(GraniteModel):
  6154. """Conversion for IBM's GraniteMoeForCausalLM"""
  6155. model_arch = gguf.MODEL_ARCH.GRANITE_MOE
  6156. def set_gguf_parameters(self):
  6157. """GraniteMoeShared uses GraniteMoe parameters plus the following:
  6158. - shared_intermediate_size
  6159. """
  6160. super().set_gguf_parameters()
  6161. if shared_feed_forward_length := self.hparams.get("shared_intermediate_size"):
  6162. self.gguf_writer.add_expert_shared_feed_forward_length(shared_feed_forward_length)
  6163. logger.info("gguf: (granitemoeshared) shared_feed_forward_length = %s", shared_feed_forward_length)
  6164. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6165. """In modeling_granitemoe, the JetMoe implementation of parallel experts
  6166. is used. This essentially merges w1 and w3 into a single tensor with 2x
  6167. the hidden size that is then split during forward. To keep compatibility
  6168. with existing mixtral support, we pull them apart here.
  6169. """
  6170. if name.endswith("block_sparse_moe.input_linear.weight"):
  6171. ffn_dim = self.hparams["intermediate_size"]
  6172. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size"
  6173. gate, up = data_torch.split(ffn_dim, dim=-2)
  6174. return [
  6175. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate),
  6176. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up),
  6177. ]
  6178. has_experts = bool(self.hparams.get('num_local_experts'))
  6179. if name.endswith("shared_mlp.input_linear.weight"):
  6180. ffn_dim = self.hparams["shared_intermediate_size"]
  6181. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * shared_intermediate_size"
  6182. gate, up = data_torch.split(ffn_dim, dim=-2)
  6183. if has_experts:
  6184. return [
  6185. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), gate),
  6186. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), up),
  6187. ]
  6188. return [
  6189. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), gate),
  6190. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), up),
  6191. ]
  6192. if not has_experts and name.endswith("shared_mlp.output_linear.weight"):
  6193. return [
  6194. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, bid), data_torch)
  6195. ]
  6196. return super().modify_tensors(data_torch, name, bid)
  6197. @ModelBase.register("GraniteMoeHybridForCausalLM", "BambaForCausalLM")
  6198. class GraniteHybridModel(Mamba2Model, GraniteMoeModel):
  6199. """GraniteHybrid is a hybrid SSM + Attention model that uses Mamba2 SSM
  6200. layers and optionally uses MoE w/ a shared expert"""
  6201. model_arch = gguf.MODEL_ARCH.GRANITE_HYBRID
  6202. undo_permute = True
  6203. def __init__(self, *args, **kwargs):
  6204. # Hybrid mamba models use a prefix for the mamba-specific params.
  6205. # TODO: Extend this if the prefix(es) need to be configurable
  6206. self.hparam_prefixes = ["mamba"]
  6207. super().__init__(*args, **kwargs)
  6208. # Lists of which layers use ssm vs attention
  6209. self._attn_layers = self.get_attn_layers()
  6210. self._ssm_layers = [
  6211. i for i in range(self.block_count)
  6212. if i not in self._attn_layers
  6213. ]
  6214. # n_group and d_inner are used during reshape_tensors for mamba2
  6215. # NOTE: Explicitly include hparam prefix prefix for d_model to
  6216. # disambiguate with top-level head_dim
  6217. # NOTE 2: If needed for future models, this can be isolated in a method
  6218. # to separate the prefix setting and teh keys used
  6219. self.d_model = self.find_hparam([f"{self.hparam_prefixes[0]}_head_dim", "hidden_size", "d_model"])
  6220. self.n_group = self.find_hparam(["n_groups", "num_groups"])
  6221. self.d_inner = self.find_hparam(["expand", "num_heads"]) * self.d_model
  6222. def get_attn_layers(self):
  6223. # Explicit list of layer type names
  6224. if layer_types := self.hparams.get("layer_types"):
  6225. return [
  6226. i for i, typ in enumerate(layer_types)
  6227. if typ == "attention"
  6228. ]
  6229. # Layer types indicated by index or period
  6230. attn_layers = self.hparams.get("attn_layer_indices", [])
  6231. if not attn_layers:
  6232. attn_period = self.hparams.get("attn_layer_period")
  6233. assert attn_period, "Didn't find attn_layer_indices or attn_layer_period"
  6234. attn_offset = self.hparams.get("attn_layer_offset")
  6235. assert attn_offset is not None, "No attention layer offset set with attn_layer_period"
  6236. attn_layers = [
  6237. i for i in range(self.block_count)
  6238. if i % attn_period == attn_offset
  6239. ]
  6240. return attn_layers
  6241. def find_hparam(self, keys: Iterable[str], *args, **kwargs) -> Any:
  6242. prefixed = []
  6243. for pfx in self.hparam_prefixes:
  6244. prefixed.extend(
  6245. "_".join([pfx, k])
  6246. for k in keys
  6247. )
  6248. keys = list(keys) + prefixed
  6249. return Mamba2Model.find_hparam(self, keys, *args, **kwargs)
  6250. def modify_tensors(
  6251. self, data_torch: Tensor, name: str, bid: int | None
  6252. ) -> Iterable[tuple[str, Tensor]]:
  6253. if (
  6254. name.endswith("block_sparse_moe.input_linear.weight")
  6255. or "shared_mlp" in name
  6256. ):
  6257. return GraniteMoeModel.modify_tensors(self, data_torch, name, bid)
  6258. # Determine whether this is a mamba layer or an attention layer
  6259. if bid in self._ssm_layers:
  6260. return Mamba2Model.modify_tensors(self, data_torch, name, bid)
  6261. elif bid in self._attn_layers:
  6262. return GraniteMoeModel.modify_tensors(self, data_torch, name, bid)
  6263. return [(self.map_tensor_name(name), data_torch)]
  6264. def set_gguf_parameters(self):
  6265. """This method merges params from both parents and some that are
  6266. specific to this model. The result is some duplication of how the params
  6267. get set. The following warnings are expected during conversion:
  6268. WARNING:Duplicated key name 'granitehybrid.attention.head_count_kv'
  6269. WARNING:Duplicated key name 'granitehybrid.context_length'
  6270. """
  6271. GraniteMoeModel.set_gguf_parameters(self)
  6272. ## Mamba mixer params ##
  6273. self.gguf_writer.add_ssm_conv_kernel(self.find_hparam(["conv_kernel", "d_conv"]))
  6274. self.gguf_writer.add_ssm_state_size(self.find_hparam(["state_size", "d_state", "state_dim", "ssm_state_size"]))
  6275. self.gguf_writer.add_ssm_group_count(self.n_group)
  6276. self.gguf_writer.add_ssm_inner_size(self.d_inner)
  6277. # NOTE: The mamba_dt_rank is _not_ the right field for how this is used
  6278. # in llama.cpp
  6279. self.gguf_writer.add_ssm_time_step_rank(self.find_hparam(["n_heads", "num_heads"]))
  6280. ## Attention params ##
  6281. head_count_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  6282. head_count_kv_vec = [
  6283. head_count_kv if i in self._attn_layers else 0 for i in range(self.block_count)
  6284. ]
  6285. if rope_dim := self.hparams.get("attn_rotary_emb"):
  6286. self.gguf_writer.add_rope_dimension_count(rope_dim)
  6287. self.gguf_writer.add_head_count_kv(head_count_kv_vec)
  6288. ## If Bamba, use rope, otherwise don't
  6289. use_rope = "BambaForCausalLM" in self.hparams["architectures"]
  6290. self.gguf_writer.add_rope_scaling_finetuned(use_rope)
  6291. if not use_rope:
  6292. self.gguf_writer.add_context_length(2**20)
  6293. ## Validation ##
  6294. d_head = self.find_hparam(["d_head"], optional=True) or 64
  6295. assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
  6296. assert self.d_inner % d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {d_head}"
  6297. def set_vocab(self):
  6298. self.hparams["pad_vocab_size_multiple"] = 8
  6299. Mamba2Model.set_vocab(self)
  6300. @ModelBase.register("NemotronHForCausalLM")
  6301. class NemotronHModel(GraniteHybridModel):
  6302. """Hybrid mamba2/attention model from NVIDIA"""
  6303. model_arch = gguf.MODEL_ARCH.NEMOTRON_H
  6304. def __init__(self, *args, **kwargs):
  6305. super().__init__(*args, **kwargs)
  6306. # Save the top-level head_dim for later
  6307. self.head_dim = self.hparams.get("head_dim", self.hparams.get("attention_head_dim"))
  6308. assert self.head_dim is not None, "Could not find the attention head dim in config"
  6309. # Don't use expand to calculate d_inner
  6310. self.d_inner = self.find_hparam(["num_heads"]) * self.d_model
  6311. # Update the ssm / attn / mlp layers
  6312. # M: Mamba2, *: Attention, -: MLP
  6313. hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
  6314. self._ssm_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "M"]
  6315. self._mlp_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "-"]
  6316. def get_attn_layers(self):
  6317. hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
  6318. assert len(hybrid_override_pattern) == self.block_count, "Mismatch between hybrid override and num_hidden_layers!"
  6319. return [i for i, val in enumerate(hybrid_override_pattern) if val == "*"]
  6320. def set_gguf_parameters(self):
  6321. super().set_gguf_parameters()
  6322. self.gguf_writer.add_key_length(self.head_dim)
  6323. self.gguf_writer.add_value_length(self.head_dim)
  6324. # Set feed_forward_length
  6325. # NOTE: This will trigger an override warning. This is preferrable to
  6326. # duplicating all the parent logic
  6327. n_ff = self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"])
  6328. self.gguf_writer.add_feed_forward_length([
  6329. n_ff if i in self._mlp_layers else 0 for i in range(self.block_count)
  6330. ])
  6331. def set_vocab(self):
  6332. super().set_vocab()
  6333. # The tokenizer _does_ add a BOS token (via post_processor type
  6334. # TemplateProcessing) but does not set add_bos_token to true in the
  6335. # config, so we need to explicitly override it here.
  6336. self.gguf_writer.add_add_bos_token(True)
  6337. @ModelBase.register("BailingMoeForCausalLM")
  6338. class BailingMoeModel(TextModel):
  6339. model_arch = gguf.MODEL_ARCH.BAILINGMOE
  6340. def set_vocab(self):
  6341. self._set_vocab_gpt2()
  6342. def set_gguf_parameters(self):
  6343. super().set_gguf_parameters()
  6344. hparams = self.hparams
  6345. if (rope_dim := hparams.get("head_dim")) is None:
  6346. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  6347. self.gguf_writer.add_rope_dimension_count(rope_dim)
  6348. rope_scaling = self.hparams.get("rope_scaling") or {}
  6349. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  6350. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  6351. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6352. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  6353. else:
  6354. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6355. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  6356. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6357. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  6358. self.gguf_writer.add_expert_weights_scale(1.0)
  6359. self.gguf_writer.add_expert_count(hparams["num_experts"])
  6360. self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
  6361. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  6362. _experts: list[dict[str, Tensor]] | None = None
  6363. @staticmethod
  6364. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  6365. if n_head_kv is not None and n_head != n_head_kv:
  6366. n_head = n_head_kv
  6367. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  6368. .swapaxes(1, 2)
  6369. .reshape(weights.shape))
  6370. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6371. n_head = self.hparams["num_attention_heads"]
  6372. n_kv_head = self.hparams.get("num_key_value_heads")
  6373. n_embd = self.hparams["hidden_size"]
  6374. if (head_dim := self.hparams.get("head_dim")) is None:
  6375. head_dim = n_embd // n_head
  6376. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  6377. if name.endswith("attention.dense.weight"):
  6378. return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), data_torch)]
  6379. elif name.endswith("query_key_value.weight"):
  6380. q, k, v = data_torch.split([n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim], dim=-2)
  6381. return [
  6382. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), BailingMoeModel.permute(q, n_head, n_head)),
  6383. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), BailingMoeModel.permute(k, n_head, n_kv_head)),
  6384. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v)
  6385. ]
  6386. elif name.find("mlp.experts") != -1:
  6387. n_experts = self.hparams["num_experts"]
  6388. assert bid is not None
  6389. tensors: list[tuple[str, Tensor]] = []
  6390. if self._experts is None:
  6391. self._experts = [{} for _ in range(self.block_count)]
  6392. self._experts[bid][name] = data_torch
  6393. if len(self._experts[bid]) >= n_experts * 3:
  6394. # merge the experts into a single 3d tensor
  6395. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6396. datas: list[Tensor] = []
  6397. for xid in range(n_experts):
  6398. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6399. datas.append(self._experts[bid][ename])
  6400. del self._experts[bid][ename]
  6401. data_torch = torch.stack(datas, dim=0)
  6402. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6403. new_name = self.map_tensor_name(merged_name)
  6404. tensors.append((new_name, data_torch))
  6405. return tensors
  6406. new_name = self.map_tensor_name(name)
  6407. if new_name == output_name and self.hparams.get("norm_head"):
  6408. data_torch = data_torch.float()
  6409. data_torch /= torch.norm(data_torch, p=2, dim=0, keepdim=True) + 1e-7
  6410. return [(new_name, data_torch)]
  6411. def prepare_tensors(self):
  6412. super().prepare_tensors()
  6413. if self._experts is not None:
  6414. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6415. experts = [k for d in self._experts for k in d.keys()]
  6416. if len(experts) > 0:
  6417. raise ValueError(f"Unprocessed experts: {experts}")
  6418. @ModelBase.register("ChameleonForConditionalGeneration")
  6419. @ModelBase.register("ChameleonForCausalLM") # obsolete
  6420. class ChameleonModel(TextModel):
  6421. model_arch = gguf.MODEL_ARCH.CHAMELEON
  6422. def set_gguf_parameters(self):
  6423. super().set_gguf_parameters()
  6424. self.gguf_writer.add_swin_norm(self.hparams.get("swin_norm", False))
  6425. def set_vocab(self):
  6426. self._set_vocab_gpt2()
  6427. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6428. # ignore image tokenizer for now
  6429. # TODO: remove this once image support is implemented for Chameleon
  6430. if name.startswith("model.vqmodel"):
  6431. return []
  6432. n_head = self.hparams["num_attention_heads"]
  6433. n_kv_head = self.hparams.get("num_key_value_heads")
  6434. hidden_dim = self.hparams.get("hidden_size")
  6435. if name.endswith(("q_proj.weight", "q_proj.bias")):
  6436. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  6437. if name.endswith(("k_proj.weight", "k_proj.bias")):
  6438. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  6439. if name.endswith(("q_norm.weight", "q_norm.bias")):
  6440. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim)
  6441. if name.endswith(("k_norm.weight", "k_norm.bias")):
  6442. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim)
  6443. return [(self.map_tensor_name(name), data_torch)]
  6444. # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203
  6445. @staticmethod
  6446. def _reverse_hf_permute(data_torch, n_heads, hidden_dim):
  6447. head_dim = hidden_dim // n_heads
  6448. data_torch = data_torch[0].view(2, head_dim // 2).t().reshape(1, -1)
  6449. data_torch = data_torch.repeat_interleave(n_heads, 0)
  6450. return data_torch
  6451. @ModelBase.register("UltravoxModel")
  6452. class UltravoxModel(TextModel):
  6453. model_arch = gguf.MODEL_ARCH.LLAMA # dummy
  6454. def __init__(self, *args, **kwargs):
  6455. super().__init__(*args, **kwargs)
  6456. raise NotImplementedError("Ultravox does not have text decoder. Instead, it uses Llama or other models for text. If you want to get the audio encoder, please use --mmproj argument")
  6457. @ModelBase.register("Qwen2AudioForConditionalGeneration")
  6458. class WhisperEncoderModel(MmprojModel):
  6459. has_vision_encoder = False # no vision encoder
  6460. has_audio_encoder = True
  6461. def __init__(self, *args, **kwargs):
  6462. super().__init__(*args, **kwargs)
  6463. if "hidden_size" not in self.hparams and "intermediate_size" not in self.hparams:
  6464. self.hparams["hidden_size"] = self.hparams["d_model"]
  6465. self.hparams["intermediate_size"] = self.hparams["encoder_ffn_dim"]
  6466. self.hparams["num_attention_heads"] = self.hparams["encoder_attention_heads"]
  6467. def set_gguf_parameters(self):
  6468. super().set_gguf_parameters()
  6469. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2A)
  6470. self.gguf_writer.add_audio_num_mel_bins(self.hparams["num_mel_bins"])
  6471. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  6472. def tensor_force_quant(self, name, new_name, bid, n_dims):
  6473. if ".conv" in name and ".weight" in name:
  6474. return gguf.GGMLQuantizationType.F16
  6475. return super().tensor_force_quant(name, new_name, bid, n_dims)
  6476. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6477. del bid # unused
  6478. if name.startswith("language_model."):
  6479. # skip language model tensors
  6480. return []
  6481. # prevent clash naming with vision tensors
  6482. if name.startswith("multi_modal_projector"):
  6483. name = "audio." + name
  6484. if "conv1.bias" in name or "conv2.bias" in name:
  6485. # transpose conv1 and conv2 bias
  6486. data_torch = data_torch.unsqueeze(-1)
  6487. return [(self.map_tensor_name(name), data_torch)]
  6488. @ModelBase.register("UltravoxModel")
  6489. class UltravoxWhisperEncoderModel(WhisperEncoderModel):
  6490. has_vision_encoder = False # no vision encoder
  6491. has_audio_encoder = True
  6492. def set_gguf_parameters(self):
  6493. super().set_gguf_parameters()
  6494. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.ULTRAVOX)
  6495. self.gguf_writer.add_audio_stack_factor(self.global_config["stack_factor"])
  6496. @ModelBase.register("VoxtralForConditionalGeneration")
  6497. class VoxtralWhisperEncoderModel(WhisperEncoderModel):
  6498. has_vision_encoder = False # no vision encoder
  6499. has_audio_encoder = True
  6500. def set_gguf_parameters(self):
  6501. super().set_gguf_parameters()
  6502. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.VOXTRAL)
  6503. self.gguf_writer.add_audio_stack_factor(4) # == intermediate_size // hidden_size
  6504. @ModelBase.register("FalconH1ForCausalLM")
  6505. class FalconH1Model(Mamba2Model):
  6506. model_arch = gguf.MODEL_ARCH.FALCON_H1
  6507. def __init__(self, *args, **kwargs):
  6508. # Set the hparam prefixes for Falcon Mamba2
  6509. self.hparam_prefixes = ["mamba"]
  6510. # Initialize the base Mamba2Model
  6511. super().__init__(*args, **kwargs)
  6512. # Use Llama conversion for attention
  6513. self._transformer_model_class = LlamaModel
  6514. # n_group and d_inner are used during reshape_tensors for mamba2
  6515. self.n_group = self.find_hparam(["n_groups"])
  6516. self.d_inner = self.find_hparam(["mamba_d_ssm"])
  6517. self.d_head = self.find_hparam(["d_head"])
  6518. # Initialize any Falcon Mamba2 specific attributes
  6519. self.has_attention = True # Falcon Mamba2 has attention components
  6520. # Load Falcon-H1 multipliers from hyperparameters
  6521. self.attention_in_multiplier = self.find_hparam(["attention_in_multiplier"], optional=True)
  6522. self.attention_out_multiplier = self.find_hparam(["attention_out_multiplier"], optional=True)
  6523. self.ssm_in_multiplier = self.find_hparam(["ssm_in_multiplier"], optional=True)
  6524. self.ssm_out_multiplier = self.find_hparam(["ssm_out_multiplier"], optional=True)
  6525. self.mlp_multipliers = self.find_hparam(["mlp_multipliers"], optional=True)
  6526. self.ssm_multipliers = self.find_hparam(["ssm_multipliers"], optional=True)
  6527. self.intermediate_size = self.find_hparam(["intermediate_size"])
  6528. self.key_multiplier = self.find_hparam(["key_multiplier"], optional=True)
  6529. def find_hparam(self, keys: Iterable[str], *args, **kwargs) -> Any:
  6530. prefixed = []
  6531. for pfx in self.hparam_prefixes:
  6532. prefixed.extend(
  6533. "_".join([pfx, k])
  6534. for k in keys
  6535. )
  6536. keys = list(keys) + prefixed
  6537. return super().find_hparam(keys, *args, **kwargs)
  6538. def set_vocab(self):
  6539. self._set_vocab_gpt2()
  6540. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6541. tensors = list(super().modify_tensors(data_torch, name, bid))
  6542. tensor = tensors[0][1]
  6543. if "down_proj" in name:
  6544. tensor = tensor * self.mlp_multipliers[1]
  6545. elif "gate_proj" in name:
  6546. tensor = tensor * self.mlp_multipliers[0]
  6547. elif "k_proj" in name:
  6548. tensor = tensor * self.key_multiplier * self.attention_in_multiplier
  6549. elif "q_proj" in name:
  6550. tensor = tensor * self.attention_in_multiplier
  6551. elif "v_proj" in name:
  6552. tensor = tensor * self.attention_in_multiplier
  6553. elif "o_proj" in name:
  6554. tensor = tensor * self.attention_out_multiplier
  6555. elif "out_proj" in name:
  6556. tensor = tensor * self.ssm_out_multiplier
  6557. elif "in_proj" in name:
  6558. tensor = tensor * self.ssm_in_multiplier
  6559. zxbcdt_multipliers = self.hparams["ssm_multipliers"]
  6560. intermediate_size = self.hparams["mamba_d_ssm"]
  6561. groups_time_state_size = self.hparams["mamba_n_groups"] * self.hparams["mamba_d_state"]
  6562. tensor[:intermediate_size, :] *= zxbcdt_multipliers[0]
  6563. tensor[intermediate_size:2 * intermediate_size, :] *= zxbcdt_multipliers[1]
  6564. tensor[2 * intermediate_size:2 * intermediate_size + groups_time_state_size, :] *= zxbcdt_multipliers[2]
  6565. tensor[2 * intermediate_size + groups_time_state_size:2 * intermediate_size + 2 * groups_time_state_size, :] *= zxbcdt_multipliers[3]
  6566. tensor[2 * intermediate_size + 2 * groups_time_state_size:, :] *= zxbcdt_multipliers[4]
  6567. elif "lm_head" in name:
  6568. tensor = tensor * self.hparams["lm_head_multiplier"]
  6569. elif "embed_tokens" in name:
  6570. tensor = tensor * self.hparams["embedding_multiplier"]
  6571. elif "mamba.norm" in name:
  6572. tensor = tensor.reshape(self.n_group, self.d_inner // self.n_group)
  6573. tensors = [(tensors[0][0], tensor)]
  6574. return tensors
  6575. def set_gguf_parameters(self):
  6576. super().set_gguf_parameters()
  6577. ## General Params ##
  6578. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  6579. # Override some Mamba2 defaults
  6580. self.gguf_writer.add_block_count(self.block_count)
  6581. self.gguf_writer.add_context_length(self.hparams.get("max_position_embeddings", 0))
  6582. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  6583. ## Attention params ##
  6584. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) # Override value 0 from Mamba2
  6585. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  6586. self.gguf_writer.add_key_length(self.hparams["head_dim"])
  6587. self.gguf_writer.add_value_length(self.hparams["head_dim"])
  6588. ## Validation ##
  6589. assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
  6590. assert self.d_inner % self.d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {self.d_head}"
  6591. # Add any other Falcon Mamba2 specific configuration
  6592. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  6593. @ModelBase.register("HunYuanMoEV1ForCausalLM")
  6594. class HunYuanMoEModel(TextModel):
  6595. model_arch = gguf.MODEL_ARCH.HUNYUAN_MOE
  6596. def set_vocab(self):
  6597. from transformers import AutoTokenizer
  6598. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  6599. # 1. Get the pre-tokenizer identifier hash
  6600. tokpre = self.get_vocab_base_pre(tokenizer)
  6601. # 2. Reverse-engineer the merges list from mergeable_ranks
  6602. merges = []
  6603. vocab = {}
  6604. mergeable_ranks = tokenizer.mergeable_ranks
  6605. for token, rank in mergeable_ranks.items():
  6606. vocab[QwenModel.token_bytes_to_string(token)] = rank
  6607. if len(token) == 1:
  6608. continue
  6609. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  6610. if len(merged) == 2: # todo this is an assert in Qwen, why?
  6611. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  6612. # 3. Generate the tokens and toktypes lists
  6613. vocab_size = self.hparams["vocab_size"]
  6614. assert tokenizer.vocab_size == vocab_size
  6615. special_tokens = tokenizer.special_tokens
  6616. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  6617. tokens: list[str] = []
  6618. toktypes: list[int] = []
  6619. for i in range(vocab_size):
  6620. if i not in reverse_vocab:
  6621. tokens.append(f"[PAD{i}]")
  6622. toktypes.append(gguf.TokenType.UNUSED)
  6623. else:
  6624. token = reverse_vocab[i]
  6625. tokens.append(token)
  6626. if i in special_tokens.values():
  6627. toktypes.append(gguf.TokenType.CONTROL)
  6628. else:
  6629. toktypes.append(gguf.TokenType.NORMAL)
  6630. # 4. Write all vocab-related fields to the GGUF writer
  6631. self.gguf_writer.add_tokenizer_model("gpt2")
  6632. self.gguf_writer.add_tokenizer_pre(tokpre)
  6633. self.gguf_writer.add_token_list(tokens)
  6634. self.gguf_writer.add_token_types(toktypes)
  6635. self.gguf_writer.add_token_merges(merges)
  6636. # 5. Add special tokens and chat templates
  6637. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  6638. special_vocab.add_to_gguf(self.gguf_writer)
  6639. # FIX for BOS token: Overwrite incorrect id read from config.json
  6640. self.gguf_writer.add_bos_token_id(127959) # <|bos|>
  6641. def set_gguf_parameters(self):
  6642. super().set_gguf_parameters()
  6643. hparams = self.hparams
  6644. self.gguf_writer.add_expert_count(hparams["num_experts"])
  6645. self.gguf_writer.add_expert_shared_feed_forward_length(hparams["intermediate_size"])
  6646. moe_intermediate_size = hparams["moe_intermediate_size"]
  6647. assert all(n == moe_intermediate_size[0] for n in moe_intermediate_size)
  6648. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size[0])
  6649. moe_topk = hparams["moe_topk"]
  6650. assert all(topk == moe_topk[0] for topk in moe_topk)
  6651. self.gguf_writer.add_expert_used_count(moe_topk[0])
  6652. moe_shared_expert = hparams["num_shared_expert"]
  6653. assert all(n == moe_shared_expert[0] for n in moe_shared_expert)
  6654. self.gguf_writer.add_expert_shared_count(moe_shared_expert[0])
  6655. # Rope
  6656. rope_scaling = hparams.get("rope_scaling", {})
  6657. if rope_scaling.get("type") == "dynamic":
  6658. # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
  6659. # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
  6660. alpha = rope_scaling.get("alpha", 1000)
  6661. base = hparams.get("rope_theta", 10000.0)
  6662. dim = (hparams["hidden_size"] // hparams["num_attention_heads"]) # 128
  6663. scaled_base = base * (alpha ** (dim / (dim - 2))) # 10000 * (1000 ** (128 / 126)) = 11158839.9251
  6664. self.gguf_writer.add_rope_freq_base(scaled_base)
  6665. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6666. self.gguf_writer.add_rope_scaling_factor(1)
  6667. # There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
  6668. self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
  6669. self.gguf_writer.add_context_length(256 * 1024) # 256k context length
  6670. # if any of our assumptions about the values are wrong, something has changed and this may need to be updated
  6671. assert alpha == 1000 and base == 10000.0 and dim == 128 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
  6672. "HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
  6673. _experts: list[dict[str, Tensor]] | None = None
  6674. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6675. if name == "lm_head.weight":
  6676. if self.hparams.get("tie_word_embeddings", False):
  6677. logger.info("Skipping tied output layer 'lm_head.weight'")
  6678. return []
  6679. if name.find("mlp.experts") != -1:
  6680. n_experts = self.hparams["num_experts"]
  6681. assert bid is not None
  6682. if self._experts is None:
  6683. self._experts = [{} for _ in range(self.block_count)]
  6684. self._experts[bid][name] = data_torch
  6685. if len(self._experts[bid]) >= n_experts * 3:
  6686. # merge the experts into a single 3d tensor
  6687. tensors: list[tuple[str, Tensor]] = []
  6688. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6689. datas: list[Tensor] = []
  6690. for xid in range(n_experts):
  6691. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6692. datas.append(self._experts[bid][ename])
  6693. del self._experts[bid][ename]
  6694. data_torch = torch.stack(datas, dim=0)
  6695. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6696. new_name = self.map_tensor_name(merged_name)
  6697. tensors.append((new_name, data_torch))
  6698. return tensors
  6699. else:
  6700. return []
  6701. return [(self.map_tensor_name(name), data_torch)]
  6702. def prepare_tensors(self):
  6703. super().prepare_tensors()
  6704. if self._experts is not None:
  6705. experts = [k for d in self._experts for k in d.keys()]
  6706. if len(experts) > 0:
  6707. raise ValueError(f"Unprocessed experts: {experts}")
  6708. @ModelBase.register("LLaDAMoEModel", "LLaDAMoEModelLM")
  6709. class LLaDAMoEModel(TextModel):
  6710. model_arch = gguf.MODEL_ARCH.LLADA_MOE
  6711. def set_gguf_parameters(self):
  6712. super().set_gguf_parameters()
  6713. if (n_experts := self.hparams.get("num_experts")) is not None:
  6714. self.gguf_writer.add_expert_count(n_experts)
  6715. if (expert_intermediate_size := self.hparams.get("expert_intermediate_size")) is not None:
  6716. self.gguf_writer.add_expert_feed_forward_length(expert_intermediate_size)
  6717. # number of experts used per token (top-k)
  6718. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  6719. self.gguf_writer.add_expert_used_count(n_experts_used)
  6720. self.gguf_writer.add_mask_token_id(156895)
  6721. self.gguf_writer.add_causal_attention(False)
  6722. self.gguf_writer.add_diffusion_shift_logits(False)
  6723. _experts: list[dict[str, Tensor]] | None = None
  6724. # Copied from: Qwen2MoeModel
  6725. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6726. # process the experts separately
  6727. if name.find("experts") != -1:
  6728. n_experts = self.hparams["num_experts"]
  6729. assert bid is not None
  6730. if self._experts is None:
  6731. self._experts = [{} for _ in range(self.block_count)]
  6732. self._experts[bid][name] = data_torch
  6733. if len(self._experts[bid]) >= n_experts * 3:
  6734. tensors: list[tuple[str, Tensor]] = []
  6735. # merge the experts into a single 3d tensor
  6736. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6737. datas: list[Tensor] = []
  6738. for xid in range(n_experts):
  6739. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6740. datas.append(self._experts[bid][ename])
  6741. del self._experts[bid][ename]
  6742. data_torch = torch.stack(datas, dim=0)
  6743. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6744. new_name = self.map_tensor_name(merged_name)
  6745. tensors.append((new_name, data_torch))
  6746. return tensors
  6747. else:
  6748. return []
  6749. return [(self.map_tensor_name(name), data_torch)]
  6750. # Copied from: Qwen2MoeModel
  6751. def prepare_tensors(self):
  6752. super().prepare_tensors()
  6753. if self._experts is not None:
  6754. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6755. experts = [k for d in self._experts for k in d.keys()]
  6756. if len(experts) > 0:
  6757. raise ValueError(f"Unprocessed experts: {experts}")
  6758. @ModelBase.register("HunYuanDenseV1ForCausalLM")
  6759. class HunYuanModel(TextModel):
  6760. model_arch = gguf.MODEL_ARCH.HUNYUAN_DENSE
  6761. def set_vocab(self):
  6762. if (self.dir_model / "tokenizer.json").is_file():
  6763. self._set_vocab_gpt2()
  6764. else:
  6765. from transformers import AutoTokenizer
  6766. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  6767. # 1. Get the pre-tokenizer identifier hash
  6768. tokpre = self.get_vocab_base_pre(tokenizer)
  6769. # 2. Reverse-engineer the merges list from mergeable_ranks
  6770. merges = []
  6771. vocab = {}
  6772. mergeable_ranks = tokenizer.mergeable_ranks
  6773. for token, rank in mergeable_ranks.items():
  6774. vocab[QwenModel.token_bytes_to_string(token)] = rank
  6775. if len(token) == 1:
  6776. continue
  6777. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  6778. if len(merged) == 2:
  6779. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  6780. # 3. Generate the tokens and toktypes lists
  6781. vocab_size = self.hparams["vocab_size"]
  6782. assert tokenizer.vocab_size == vocab_size
  6783. special_tokens = tokenizer.special_tokens
  6784. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  6785. tokens: list[str] = []
  6786. toktypes: list[int] = []
  6787. for i in range(vocab_size):
  6788. if i not in reverse_vocab:
  6789. tokens.append(f"[PAD{i}]")
  6790. toktypes.append(gguf.TokenType.UNUSED)
  6791. else:
  6792. token = reverse_vocab[i]
  6793. tokens.append(token)
  6794. if i in special_tokens.values():
  6795. toktypes.append(gguf.TokenType.CONTROL)
  6796. else:
  6797. toktypes.append(gguf.TokenType.NORMAL)
  6798. # 4. Write all vocab-related fields to the GGUF writer
  6799. self.gguf_writer.add_tokenizer_model("gpt2")
  6800. self.gguf_writer.add_tokenizer_pre(tokpre)
  6801. self.gguf_writer.add_token_list(tokens)
  6802. self.gguf_writer.add_token_types(toktypes)
  6803. self.gguf_writer.add_token_merges(merges)
  6804. # 5. Add special tokens and chat templates
  6805. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  6806. special_vocab.add_to_gguf(self.gguf_writer)
  6807. # FIX for BOS token: Overwrite incorrect id read from config.json
  6808. if self.hparams['hidden_size'] == 4096:
  6809. self.gguf_writer.add_bos_token_id(127958) # only for 7b dense, fix <|bos|> token
  6810. def set_gguf_parameters(self):
  6811. super().set_gguf_parameters()
  6812. hparams = self.hparams
  6813. # Rope
  6814. rope_scaling = hparams.get("rope_scaling", {})
  6815. if rope_scaling.get("type") == "dynamic":
  6816. # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
  6817. # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
  6818. alpha = rope_scaling.get("alpha", 50)
  6819. base = hparams.get("rope_theta", 10000.0)
  6820. dim = hparams["head_dim"]
  6821. scaled_base = base * (alpha ** (dim / (dim - 2)))
  6822. self.gguf_writer.add_rope_freq_base(scaled_base)
  6823. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6824. self.gguf_writer.add_rope_scaling_factor(1)
  6825. # There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
  6826. self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
  6827. self.gguf_writer.add_context_length(256 * 1024) # 256k context length
  6828. # if any of our assumptions about the values are wrong, something has changed and this may need to be updated
  6829. assert base == 10000.0 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
  6830. "HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
  6831. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6832. if name == "lm_head.weight":
  6833. if self.hparams.get("tie_word_embeddings", False):
  6834. logger.info("Skipping tied output layer 'lm_head.weight'")
  6835. return []
  6836. return [(self.map_tensor_name(name), data_torch)]
  6837. @ModelBase.register("SmolLM3ForCausalLM")
  6838. class SmolLM3Model(LlamaModel):
  6839. model_arch = gguf.MODEL_ARCH.SMOLLM3
  6840. def set_vocab(self):
  6841. super().set_vocab()
  6842. # remove unsupported array slicing in chat template
  6843. # ref: https://huggingface.co/ggml-org/SmolLM3-3B-GGUF/discussions/1
  6844. from transformers import AutoTokenizer
  6845. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  6846. if tokenizer.chat_template is not None:
  6847. chat_template = tokenizer.chat_template.replace("[:]", "")
  6848. self.gguf_writer.add_chat_template(chat_template)
  6849. @ModelBase.register("GptOssForCausalLM")
  6850. class GptOssModel(TextModel):
  6851. model_arch = gguf.MODEL_ARCH.GPT_OSS
  6852. def transform_nibble_layout(self, tensor):
  6853. assert tensor.dtype == torch.uint8
  6854. assert tensor.shape[-1] == 16
  6855. # swap nibbles
  6856. t_lo = tensor & 0x0F
  6857. t_hi = tensor & 0xF0
  6858. t_swapped = (t_lo << 4) | (t_hi >> 4)
  6859. tensor = t_swapped
  6860. # transform aaaa...bbbb... to abababab...
  6861. blk_a, blk_b = tensor.chunk(2, dim=-1)
  6862. # get a_
  6863. blk_a0 = (blk_a & 0xF0).view(-1, 1)
  6864. blk_a1 = (blk_a << 4).view(-1, 1)
  6865. blk_a = torch.stack((blk_a0, blk_a1), dim=2).view(tensor.shape)
  6866. # get _b
  6867. blk_b0 = (blk_b >> 4).view(-1, 1)
  6868. blk_b1 = (blk_b & 0x0F).view(-1, 1)
  6869. blk_b = torch.stack((blk_b0, blk_b1), dim=2).view(tensor.shape)
  6870. # swap once more
  6871. out = blk_a | blk_b
  6872. out_h = out & 0xF0
  6873. out_l = out & 0x0F
  6874. out = (out_h >> 4) | (out_l << 4)
  6875. return out
  6876. def repack_mxfp4(self, new_name: str, blocks: Tensor, scales: Tensor):
  6877. assert blocks.dtype == torch.uint8
  6878. assert scales.dtype == torch.uint8
  6879. scales = scales.unsqueeze(-1)
  6880. assert len(blocks.shape) == 4
  6881. assert len(scales.shape) == 4
  6882. blocks = self.transform_nibble_layout(blocks)
  6883. new_data = torch.concat((scales, blocks), dim=-1)
  6884. new_shape = [new_data.shape[0], new_data.shape[1], new_data.shape[2] * 32]
  6885. logger.info(f"Repacked {new_name} with shape {new_shape} and quantization MXFP4")
  6886. # flatten last dim
  6887. new_data = new_data.view(new_data.shape[0], new_data.shape[1], new_data.shape[2] * new_data.shape[3])
  6888. new_data = new_data.numpy()
  6889. self.gguf_writer.add_tensor(new_name, new_data, raw_dtype=gguf.GGMLQuantizationType.MXFP4)
  6890. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6891. blocks0: Tensor = torch.zeros(1)
  6892. blocks1: Tensor = torch.zeros(1)
  6893. # we assume that tensors are loaded in the correct order
  6894. for name, data_torch in self.get_tensors():
  6895. if "mlp.experts.down_proj_blocks" in name:
  6896. blocks0 = data_torch
  6897. elif "mlp.experts.down_proj_scales" in name:
  6898. new_name = self.map_tensor_name(name.replace("_scales", ".weight"))
  6899. self.repack_mxfp4(new_name, blocks0, data_torch)
  6900. elif "mlp.experts.gate_up_proj_blocks" in name:
  6901. blocks0, blocks1 = data_torch[:, ::2, :, :], data_torch[:, 1::2, :, :]
  6902. elif "mlp.experts.gate_up_proj_scales" in name:
  6903. scales0, scales1 = data_torch[:, ::2, :], data_torch[:, 1::2, :]
  6904. new_name_gate = self.map_tensor_name(name.replace("gate_up_proj_scales", "gate_proj.weight"))
  6905. new_name_up = self.map_tensor_name(name.replace("gate_up_proj_scales", "up_proj.weight"))
  6906. self.repack_mxfp4(new_name_gate, blocks0, scales0)
  6907. self.repack_mxfp4(new_name_up, blocks1, scales1)
  6908. return []
  6909. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6910. del bid # unused
  6911. if "sinks" in name:
  6912. name += ".weight"
  6913. # correct naming for down_proj
  6914. if "down_proj" in name:
  6915. if name.endswith("_bias"):
  6916. name = name.replace("down_proj_bias", "down_proj.bias")
  6917. elif "_blocks" not in name and "_scales" not in name:
  6918. logger.warning(f"{name} is not in MXFP4, performance may be degraded")
  6919. name = name.replace("down_proj", "down_proj.weight")
  6920. data_torch = data_torch.transpose(-1, -2)
  6921. else:
  6922. # otherwise, it should already be repacked to ggml MXFP4 format
  6923. return []
  6924. # split the gate_up into gate and up
  6925. if "gate_up_proj" in name:
  6926. if name.endswith("_bias"):
  6927. name_up = name.replace("gate_up_proj_bias", "up_proj.bias")
  6928. name_gate = name.replace("gate_up_proj_bias", "gate_proj.bias")
  6929. gate_proj_bias, up_proj_bias = data_torch[..., ::2], data_torch[..., 1::2]
  6930. return [
  6931. (self.map_tensor_name(name_gate), gate_proj_bias),
  6932. (self.map_tensor_name(name_up), up_proj_bias)
  6933. ]
  6934. elif "_blocks" not in name and "_scales" not in name:
  6935. logger.warning(f"{name} is not in MXFP4, performance may be degraded")
  6936. name_up = name.replace("gate_up_proj", "up_proj.weight")
  6937. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  6938. data_torch = data_torch.transpose(-1, -2)
  6939. gate_proj_weight, up_proj_weight = data_torch[:, ::2, :], data_torch[:, 1::2, :]
  6940. return [
  6941. (self.map_tensor_name(name_gate), gate_proj_weight),
  6942. (self.map_tensor_name(name_up), up_proj_weight)
  6943. ]
  6944. else:
  6945. # otherwise, it should already be repacked to ggml MXFP4 format
  6946. return []
  6947. return [(self.map_tensor_name(name), data_torch)]
  6948. def set_vocab(self):
  6949. self._set_vocab_gpt2()
  6950. def set_gguf_parameters(self):
  6951. super().set_gguf_parameters()
  6952. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  6953. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size"])
  6954. rope_scaling = self.hparams.get("rope_scaling") or {}
  6955. rope_type = rope_scaling.get("rope_type", rope_scaling.get("type"))
  6956. assert rope_type == "yarn", f"GPT-OSS only supports yarn rope scaling, got {rope_type}"
  6957. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  6958. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6959. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling.get("original_max_position_embeddings", 4096))
  6960. @ModelBase.register("Lfm2ForCausalLM", "LFM2ForCausalLM")
  6961. class LFM2Model(TextModel):
  6962. model_arch = gguf.MODEL_ARCH.LFM2
  6963. def _add_feed_forward_length(self):
  6964. ff_dim = self.hparams["block_ff_dim"]
  6965. auto_adjust_ff_dim = self.hparams["block_auto_adjust_ff_dim"]
  6966. ff_dim = self.hparams["block_ff_dim"]
  6967. ffn_dim_multiplier = self.hparams["block_ffn_dim_multiplier"]
  6968. multiple_of = self.hparams["block_multiple_of"]
  6969. if auto_adjust_ff_dim:
  6970. ff_dim = int(2 * ff_dim / 3)
  6971. # custom dim factor multiplier
  6972. if ffn_dim_multiplier is not None:
  6973. ff_dim = int(ffn_dim_multiplier * ff_dim)
  6974. ff_dim = multiple_of * ((ff_dim + multiple_of - 1) // multiple_of)
  6975. self.gguf_writer.add_feed_forward_length(ff_dim)
  6976. def set_gguf_parameters(self):
  6977. # set num_key_value_heads only for attention layers
  6978. self.hparams["num_key_value_heads"] = [
  6979. self.hparams["num_key_value_heads"] if layer_type == "full_attention" else 0
  6980. for layer_type in self.hparams["layer_types"]
  6981. ]
  6982. super().set_gguf_parameters()
  6983. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  6984. self.gguf_writer.add_shortconv_l_cache(self.hparams["conv_L_cache"])
  6985. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["norm_eps"])
  6986. self._add_feed_forward_length()
  6987. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6988. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  6989. if is_vision_tensor:
  6990. # skip vision tensors
  6991. return []
  6992. name = name.replace("language_model.", "")
  6993. # conv op requires 2d tensor
  6994. if 'conv.conv' in name:
  6995. data_torch = data_torch.squeeze(1)
  6996. return [(self.map_tensor_name(name), data_torch)]
  6997. @ModelBase.register("Lfm2VlForConditionalGeneration")
  6998. class LFM2VLModel(MmprojModel):
  6999. def __init__(self, *args, **kwargs):
  7000. super().__init__(*args, **kwargs)
  7001. assert self.hparams_vision is not None
  7002. # TODO(tarek): for dynamic resolution image_size is not specified, setting here for compatibility
  7003. self.hparams_vision["image_size"] = 256
  7004. def set_gguf_parameters(self):
  7005. super().set_gguf_parameters()
  7006. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.LFM2)
  7007. self.gguf_writer.add_vision_attention_layernorm_eps(self.find_vparam(["layer_norm_eps"]))
  7008. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("downsample_factor", 2))
  7009. self.gguf_writer.add_vision_use_gelu(True)
  7010. # python notation, e.g. for vision_feature_layer == -1, we pick last layer -> vision_feature_layers_to_drop = 0
  7011. vision_feature_layers_to_drop = -(self.global_config.get("vision_feature_layer", -1) + 1)
  7012. self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys) - vision_feature_layers_to_drop)
  7013. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7014. del bid # unused
  7015. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7016. if is_vision_tensor:
  7017. # remove "model." prefix
  7018. name = name.replace("model.vision_tower.", "vision_tower.")
  7019. name = name.replace("model.multi_modal_projector.", "multi_modal_projector.")
  7020. if "patch_embedding.weight" in name:
  7021. data_torch = data_torch.view(data_torch.shape[0], 16, 16, 3).permute(0, 3, 1, 2)
  7022. return [(self.map_tensor_name(name), data_torch)]
  7023. return [] # skip other tensors
  7024. @ModelBase.register("SmallThinkerForCausalLM")
  7025. class SmallThinkerModel(TextModel):
  7026. model_arch = gguf.MODEL_ARCH.SMALLTHINKER
  7027. def set_gguf_parameters(self):
  7028. super().set_gguf_parameters()
  7029. if (n_experts := self.hparams.get("num_experts", self.hparams.get("moe_num_primary_experts"))) is not None:
  7030. self.gguf_writer.add_expert_count(n_experts)
  7031. if (n_experts_used := self.hparams.get("num_experts_per_tok", self.hparams.get("moe_num_active_primary_experts"))) is not None:
  7032. self.gguf_writer.add_expert_used_count(n_experts_used)
  7033. if (moe_intermediate_size := self.hparams.get("moe_ffn_hidden_size")) is not None:
  7034. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  7035. self.gguf_writer.add_feed_forward_length(moe_intermediate_size)
  7036. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  7037. if (self.hparams.get('moe_primary_router_apply_softmax')):
  7038. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  7039. else:
  7040. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  7041. # YaRN is not enabled by default
  7042. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  7043. rope_scaling = self.hparams.get("rope_scaling") or {}
  7044. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  7045. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  7046. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  7047. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  7048. sliding_window_layout = self.hparams.get("sliding_window_layout")
  7049. if sliding_window_layout:
  7050. for i in sliding_window_layout:
  7051. if i != 0:
  7052. sliding_window = self.hparams.get("sliding_window_size")
  7053. if sliding_window:
  7054. self.gguf_writer.add_sliding_window(sliding_window)
  7055. break
  7056. _experts: list[dict[str, Tensor]] | None = None
  7057. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7058. # process the experts separately
  7059. if name.find("experts") != -1:
  7060. n_experts = self.hparams.get("num_experts", self.hparams.get("moe_num_primary_experts"))
  7061. assert bid is not None
  7062. if self._experts is None:
  7063. self._experts = [{} for _ in range(self.block_count)]
  7064. self._experts[bid][name] = data_torch
  7065. if len(self._experts[bid]) >= n_experts * 3:
  7066. tensors: list[tuple[str, Tensor]] = []
  7067. # merge the experts into a single 3d tensor
  7068. for w_name in ["down", "gate", "up"]:
  7069. datas: list[Tensor] = []
  7070. for xid in range(n_experts):
  7071. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  7072. datas.append(self._experts[bid][ename])
  7073. del self._experts[bid][ename]
  7074. data_torch = torch.stack(datas, dim=0)
  7075. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  7076. new_name = self.map_tensor_name(merged_name)
  7077. tensors.append((new_name, data_torch))
  7078. return tensors
  7079. else:
  7080. return []
  7081. return [(self.map_tensor_name(name), data_torch)]
  7082. def prepare_tensors(self):
  7083. super().prepare_tensors()
  7084. if self._experts is not None:
  7085. # flatten `list[dict[str, Tensor]]` into `list[str]`
  7086. experts = [k for d in self._experts for k in d.keys()]
  7087. if len(experts) > 0:
  7088. raise ValueError(f"Unprocessed experts: {experts}")
  7089. class MistralModel(LlamaModel):
  7090. model_arch = gguf.MODEL_ARCH.LLAMA
  7091. model_name = "Mistral"
  7092. hf_arch = ""
  7093. is_mistral_format = True
  7094. undo_permute = False
  7095. @staticmethod
  7096. def get_community_chat_template(vocab: MistralVocab, templates_dir: Path, is_mistral_format: bool):
  7097. assert TokenizerVersion is not None, "mistral_common is not installed"
  7098. assert isinstance(vocab.tokenizer, (Tekkenizer, SentencePieceTokenizer)), (
  7099. f"Expected Tekkenizer or SentencePieceTokenizer, got {type(vocab.tokenizer)}"
  7100. )
  7101. if vocab.tokenizer.version == TokenizerVersion.v1:
  7102. return "mistral-v1"
  7103. elif vocab.tokenizer.version == TokenizerVersion.v3 and vocab.tokenizer_type == MistralTokenizerType.spm:
  7104. return "mistral-v3"
  7105. elif vocab.tokenizer.version == TokenizerVersion.v3 and vocab.tokenizer_type == MistralTokenizerType.tekken:
  7106. return "mistral-v3-tekken"
  7107. elif vocab.tokenizer.version == TokenizerVersion.v7 and vocab.tokenizer_type == MistralTokenizerType.spm:
  7108. return "mistral-v7"
  7109. elif vocab.tokenizer.version == TokenizerVersion.v7 and vocab.tokenizer_type == MistralTokenizerType.tekken:
  7110. return "mistral-v7-tekken"
  7111. elif vocab.tokenizer.version == TokenizerVersion.v11:
  7112. template_file = "Mistral-Small-3.2-24B-Instruct-2506.jinja"
  7113. elif vocab.tokenizer.version == TokenizerVersion.v13:
  7114. template_file = "unsloth-mistral-Devstral-Small-2507.jinja"
  7115. else:
  7116. err_message = f"Unknown tokenizer type: {vocab.tokenizer_type} and version {vocab.tokenizer.version}"
  7117. if is_mistral_format:
  7118. err_message += (
  7119. " . Please pass --disable-mistral-community-chat-template argument to the CLI "
  7120. "if you want to skip this error and use the Mistral official `mistral-common` pre-processing library."
  7121. )
  7122. raise ValueError(err_message)
  7123. template_path = templates_dir / template_file
  7124. if not template_path.exists():
  7125. raise FileNotFoundError(f"Template file not found: {template_path}")
  7126. with open(template_path, "r", encoding="utf-8") as f:
  7127. template = f.read()
  7128. return template
  7129. class PixtralModel(LlavaVisionModel):
  7130. model_name = "Pixtral"
  7131. hf_arch = ""
  7132. is_mistral_format = True
  7133. def set_gguf_parameters(self):
  7134. super().set_gguf_parameters()
  7135. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PIXTRAL)
  7136. self.gguf_writer.add_vision_attention_layernorm_eps(
  7137. self.find_hparam(["norm_eps"])
  7138. )
  7139. self.gguf_writer.add_rope_freq_base(self.find_vparam(["rope_theta"]))
  7140. self.gguf_writer.add_vision_use_silu(True)
  7141. # spatial_merge_size
  7142. if self.find_vparam(["mm_projector_id"]) == "patch_merge":
  7143. self.gguf_writer.add_vision_spatial_merge_size(
  7144. self.find_vparam(["spatial_merge_size"])
  7145. )
  7146. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  7147. if name == "vision_language_adapter.w_in.weight":
  7148. return "mm.1.weight"
  7149. elif name == "vision_language_adapter.w_out.weight":
  7150. return "mm.2.weight"
  7151. return super().map_tensor_name(name, try_suffixes)
  7152. @ModelBase.register("KimiVLForConditionalGeneration")
  7153. class KimiVLModel(MmprojModel):
  7154. def __init__(self, *args, **kwargs):
  7155. super().__init__(*args, **kwargs)
  7156. assert self.hparams_vision is not None
  7157. self.hparams_vision["image_size"] = 64 * 14 # for compatibility
  7158. def set_gguf_parameters(self):
  7159. super().set_gguf_parameters()
  7160. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.KIMIVL)
  7161. self.gguf_writer.add_vision_use_gelu(True)
  7162. self.gguf_writer.add_vision_projector_scale_factor(2)
  7163. # eps is the same as pytorch's default value
  7164. assert self.hparams_vision is not None
  7165. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams_vision.get("layer_norm_eps", 1e-5))
  7166. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7167. del bid # unused
  7168. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7169. if is_vision_tensor:
  7170. if "pos_emb.weight" in name:
  7171. data_torch = data_torch.view(data_torch.shape[0] * data_torch.shape[1], data_torch.shape[2])
  7172. elif "wqkv" in name:
  7173. split_dim = 0 if "weight" in name else -1
  7174. wq, wk, wv = data_torch.chunk(3, dim=split_dim)
  7175. return [
  7176. (self.map_tensor_name(name.replace("wqkv", "wq")), wq),
  7177. (self.map_tensor_name(name.replace("wqkv", "wk")), wk),
  7178. (self.map_tensor_name(name.replace("wqkv", "wv")), wv)
  7179. ]
  7180. return [(self.map_tensor_name(name), data_torch)]
  7181. return [] # skip other tensors
  7182. ###### CONVERSION LOGIC ######
  7183. # tree of lazy tensors
  7184. class LazyTorchTensor(gguf.LazyBase):
  7185. _tensor_type = torch.Tensor
  7186. # to keep the type-checker happy
  7187. dtype: torch.dtype
  7188. shape: torch.Size
  7189. # only used when converting a torch.Tensor to a np.ndarray
  7190. _dtype_map: dict[torch.dtype, type] = {
  7191. torch.float16: np.float16,
  7192. torch.float32: np.float32,
  7193. torch.uint8: np.uint8,
  7194. }
  7195. # used for safetensors slices
  7196. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  7197. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  7198. _dtype_str_map: dict[str, torch.dtype] = {
  7199. "F64": torch.float64,
  7200. "F32": torch.float32,
  7201. "BF16": torch.bfloat16,
  7202. "F16": torch.float16,
  7203. # "U64": torch.uint64,
  7204. "I64": torch.int64,
  7205. # "U32": torch.uint32,
  7206. "I32": torch.int32,
  7207. # "U16": torch.uint16,
  7208. "I16": torch.int16,
  7209. "U8": torch.uint8,
  7210. "I8": torch.int8,
  7211. "BOOL": torch.bool,
  7212. "F8_E4M3": torch.float8_e4m3fn,
  7213. "F8_E5M2": torch.float8_e5m2,
  7214. }
  7215. def numpy(self) -> gguf.LazyNumpyTensor:
  7216. dtype = self._dtype_map[self.dtype]
  7217. return gguf.LazyNumpyTensor(
  7218. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  7219. args=(self,),
  7220. func=(lambda s: s.numpy())
  7221. )
  7222. @classmethod
  7223. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  7224. return torch.empty(size=shape, dtype=dtype, device="meta")
  7225. @classmethod
  7226. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  7227. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  7228. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  7229. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  7230. return cast(torch.Tensor, lazy)
  7231. @classmethod
  7232. def from_remote_tensor(cls, remote_tensor: gguf.utility.RemoteTensor):
  7233. dtype = cls._dtype_str_map[remote_tensor.dtype]
  7234. shape = remote_tensor.shape
  7235. meta = cls.meta_with_dtype_and_shape(dtype, shape)
  7236. lazy = cls(meta=meta, args=(remote_tensor,), func=lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape))
  7237. return cast(torch.Tensor, lazy)
  7238. @classmethod
  7239. def __torch_function__(cls, func, types, args=(), kwargs=None):
  7240. del types # unused
  7241. if kwargs is None:
  7242. kwargs = {}
  7243. if func is torch.Tensor.numpy:
  7244. return args[0].numpy()
  7245. return cls._wrap_fn(func)(*args, **kwargs)
  7246. def parse_args() -> argparse.Namespace:
  7247. parser = argparse.ArgumentParser(
  7248. description="Convert a huggingface model to a GGML compatible file")
  7249. parser.add_argument(
  7250. "--vocab-only", action="store_true",
  7251. help="extract only the vocab",
  7252. )
  7253. parser.add_argument(
  7254. "--outfile", type=Path,
  7255. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  7256. )
  7257. parser.add_argument(
  7258. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
  7259. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  7260. )
  7261. parser.add_argument(
  7262. "--bigendian", action="store_true",
  7263. help="model is executed on big endian machine",
  7264. )
  7265. parser.add_argument(
  7266. "model", type=str,
  7267. help="directory containing model file or huggingface repository ID (if --remote)",
  7268. nargs="?",
  7269. )
  7270. parser.add_argument(
  7271. "--use-temp-file", action="store_true",
  7272. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  7273. )
  7274. parser.add_argument(
  7275. "--no-lazy", action="store_true",
  7276. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  7277. )
  7278. parser.add_argument(
  7279. "--model-name", type=str, default=None,
  7280. help="name of the model",
  7281. )
  7282. parser.add_argument(
  7283. "--verbose", action="store_true",
  7284. help="increase output verbosity",
  7285. )
  7286. parser.add_argument(
  7287. "--split-max-tensors", type=int, default=0,
  7288. help="max tensors in each split",
  7289. )
  7290. parser.add_argument(
  7291. "--split-max-size", type=str, default="0",
  7292. help="max size per split N(M|G)",
  7293. )
  7294. parser.add_argument(
  7295. "--dry-run", action="store_true",
  7296. help="only print out a split plan and exit, without writing any new files",
  7297. )
  7298. parser.add_argument(
  7299. "--no-tensor-first-split", action="store_true",
  7300. help="do not add tensors to the first split (disabled by default)"
  7301. )
  7302. parser.add_argument(
  7303. "--metadata", type=Path,
  7304. help="Specify the path for an authorship metadata override file"
  7305. )
  7306. parser.add_argument(
  7307. "--print-supported-models", action="store_true",
  7308. help="Print the supported models"
  7309. )
  7310. parser.add_argument(
  7311. "--remote", action="store_true",
  7312. help="(Experimental) Read safetensors file remotely without downloading to disk. Config and tokenizer files will still be downloaded. To use this feature, you need to specify Hugging Face model repo name instead of a local directory. For example: 'HuggingFaceTB/SmolLM2-1.7B-Instruct'. Note: To access gated repo, set HF_TOKEN environment variable to your Hugging Face token.",
  7313. )
  7314. parser.add_argument(
  7315. "--mmproj", action="store_true",
  7316. help="(Experimental) Export multimodal projector (mmproj) for vision models. This will only work on some vision models. A prefix 'mmproj-' will be added to the output file name.",
  7317. )
  7318. parser.add_argument(
  7319. "--mistral-format", action="store_true",
  7320. help="Whether the model is stored following the Mistral format.",
  7321. )
  7322. parser.add_argument(
  7323. "--disable-mistral-community-chat-template", action="store_true",
  7324. help=(
  7325. "Whether to disable usage of Mistral community chat templates. If set, use the Mistral official `mistral-common` library for tokenization and detokenization of Mistral models. "
  7326. "Using `mistral-common` ensure correctness and zero-day support of tokenization for models converted from the Mistral format but requires to manually setup the tokenization server."
  7327. )
  7328. )
  7329. args = parser.parse_args()
  7330. if not args.print_supported_models and args.model is None:
  7331. parser.error("the following arguments are required: model")
  7332. return args
  7333. def split_str_to_n_bytes(split_str: str) -> int:
  7334. if split_str.endswith("K"):
  7335. n = int(split_str[:-1]) * 1000
  7336. elif split_str.endswith("M"):
  7337. n = int(split_str[:-1]) * 1000 * 1000
  7338. elif split_str.endswith("G"):
  7339. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  7340. elif split_str.isnumeric():
  7341. n = int(split_str)
  7342. else:
  7343. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  7344. if n < 0:
  7345. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  7346. return n
  7347. def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> str:
  7348. # TODO @ngxson : this won't work correctly if the model has both audio & vision encoders
  7349. # maybe we should fallback to text model's arch in that case, since not many models have both
  7350. text_config = hparams.get("text_config", {})
  7351. vision_config = hparams.get("vision_config", {})
  7352. arch = None
  7353. if (arches := hparams.get("architectures")) is not None and len(arches) > 0:
  7354. arch = arches[0]
  7355. elif "ssm_cfg" in hparams:
  7356. # For non-hf Mamba and Mamba2 models
  7357. arch = hparams["ssm_cfg"].get("layer", "Mamba") + "ForCausalLM"
  7358. # if "architectures" is found in the sub-config, use that instead
  7359. if model_type == ModelType.TEXT and text_config.get("architectures") is not None:
  7360. arch = text_config["architectures"][0]
  7361. elif model_type == ModelType.MMPROJ and vision_config.get("architectures") is not None:
  7362. arch = vision_config["architectures"][0]
  7363. if arch is None:
  7364. raise ValueError("Failed to detect model architecture")
  7365. return arch
  7366. def main() -> None:
  7367. args = parse_args()
  7368. if args.print_supported_models:
  7369. logger.error("Supported models:")
  7370. ModelBase.print_registered_models()
  7371. sys.exit(0)
  7372. if args.verbose:
  7373. logging.basicConfig(level=logging.DEBUG)
  7374. else:
  7375. logging.basicConfig(level=logging.INFO)
  7376. if args.remote:
  7377. hf_repo_id = args.model
  7378. from huggingface_hub import snapshot_download
  7379. local_dir = snapshot_download(
  7380. repo_id=hf_repo_id,
  7381. allow_patterns=["LICENSE", "*.json", "*.md", "*.txt", "tokenizer.model"])
  7382. dir_model = Path(local_dir)
  7383. logger.info(f"Downloaded config and tokenizer to {local_dir}")
  7384. else:
  7385. hf_repo_id = None
  7386. dir_model = Path(args.model)
  7387. if not dir_model.is_dir():
  7388. logger.error(f'Error: {dir_model} is not a directory')
  7389. sys.exit(1)
  7390. ftype_map: dict[str, gguf.LlamaFileType] = {
  7391. "f32": gguf.LlamaFileType.ALL_F32,
  7392. "f16": gguf.LlamaFileType.MOSTLY_F16,
  7393. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  7394. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  7395. "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
  7396. "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
  7397. "auto": gguf.LlamaFileType.GUESSED,
  7398. }
  7399. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  7400. if args.use_temp_file and is_split:
  7401. logger.error("Error: Cannot use temp file when splitting")
  7402. sys.exit(1)
  7403. if args.outfile is not None:
  7404. fname_out = args.outfile
  7405. elif hf_repo_id:
  7406. # if remote, use the model ID as the output file name
  7407. fname_out = Path("./" + hf_repo_id.replace("/", "-") + "-{ftype}.gguf")
  7408. else:
  7409. fname_out = dir_model
  7410. logger.info(f"Loading model: {dir_model.name}")
  7411. if args.mmproj:
  7412. if "mmproj" not in fname_out.name:
  7413. fname_out = ModelBase.add_prefix_to_filename(fname_out, "mmproj-")
  7414. is_mistral_format = args.mistral_format
  7415. disable_mistral_community_chat_template = args.disable_mistral_community_chat_template
  7416. with torch.inference_mode():
  7417. output_type = ftype_map[args.outtype]
  7418. model_type = ModelType.MMPROJ if args.mmproj else ModelType.TEXT
  7419. hparams = ModelBase.load_hparams(dir_model, is_mistral_format)
  7420. if not is_mistral_format:
  7421. model_architecture = get_model_architecture(hparams, model_type)
  7422. logger.info(f"Model architecture: {model_architecture}")
  7423. try:
  7424. model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type)
  7425. except NotImplementedError:
  7426. logger.error(f"Model {model_architecture} is not supported")
  7427. sys.exit(1)
  7428. elif args.mmproj:
  7429. assert hparams.get("vision_encoder") is not None, "This model does not support multimodal"
  7430. model_class = PixtralModel
  7431. else:
  7432. model_class = MistralModel
  7433. model_instance = model_class(dir_model, output_type, fname_out,
  7434. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  7435. eager=args.no_lazy,
  7436. metadata_override=args.metadata, model_name=args.model_name,
  7437. split_max_tensors=args.split_max_tensors,
  7438. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  7439. small_first_shard=args.no_tensor_first_split,
  7440. remote_hf_model_id=hf_repo_id, disable_mistral_community_chat_template=disable_mistral_community_chat_template
  7441. )
  7442. if args.vocab_only:
  7443. logger.info("Exporting model vocab...")
  7444. model_instance.write_vocab()
  7445. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  7446. else:
  7447. logger.info("Exporting model...")
  7448. model_instance.write()
  7449. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  7450. logger.info(f"Model successfully exported to {out_path}")
  7451. if __name__ == '__main__':
  7452. main()