convert_hf_to_gguf.py 420 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import ast
  5. import logging
  6. import argparse
  7. import contextlib
  8. import json
  9. import os
  10. import re
  11. import sys
  12. from enum import IntEnum
  13. from pathlib import Path
  14. from hashlib import sha256
  15. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  16. from itertools import chain
  17. from transformers import AutoConfig
  18. import math
  19. import numpy as np
  20. import torch
  21. if TYPE_CHECKING:
  22. from torch import Tensor
  23. if 'NO_LOCAL_GGUF' not in os.environ:
  24. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  25. import gguf
  26. from gguf.vocab import MistralTokenizerType, MistralVocab
  27. from mistral_common.tokens.tokenizers.base import TokenizerVersion
  28. from mistral_common.tokens.tokenizers.multimodal import DATASET_MEAN, DATASET_STD
  29. from mistral_common.tokens.tokenizers.tekken import Tekkenizer
  30. from mistral_common.tokens.tokenizers.sentencepiece import (
  31. SentencePieceTokenizer,
  32. )
  33. logger = logging.getLogger("hf-to-gguf")
  34. ###### MODEL DEFINITIONS ######
  35. class SentencePieceTokenTypes(IntEnum):
  36. NORMAL = 1
  37. UNKNOWN = 2
  38. CONTROL = 3
  39. USER_DEFINED = 4
  40. UNUSED = 5
  41. BYTE = 6
  42. class ModelType(IntEnum):
  43. TEXT = 1
  44. MMPROJ = 2
  45. AnyModel = TypeVar("AnyModel", bound="type[ModelBase]")
  46. class ModelBase:
  47. _model_classes: dict[ModelType, dict[str, type[ModelBase]]] = {
  48. ModelType.TEXT: {},
  49. ModelType.MMPROJ: {},
  50. }
  51. dir_model: Path
  52. ftype: gguf.LlamaFileType
  53. fname_out: Path
  54. is_big_endian: bool
  55. endianess: gguf.GGUFEndian
  56. use_temp_file: bool
  57. lazy: bool
  58. dry_run: bool
  59. part_names: list[str]
  60. is_safetensors: bool
  61. hparams: dict[str, Any]
  62. tensor_names: set[str] | None
  63. gguf_writer: gguf.GGUFWriter
  64. model_name: str | None
  65. metadata_override: Path | None
  66. dir_model_card: Path
  67. remote_hf_model_id: str | None
  68. # subclasses should define this!
  69. model_arch: gguf.MODEL_ARCH
  70. # subclasses should initialize this!
  71. block_count: int
  72. tensor_map: gguf.TensorNameMap
  73. # Mistral format specifics
  74. is_mistral_format: bool = False
  75. disable_mistral_community_chat_template: bool = False
  76. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, *, is_big_endian: bool = False,
  77. use_temp_file: bool = False, eager: bool = False,
  78. metadata_override: Path | None = None, model_name: str | None = None,
  79. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
  80. small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None,
  81. disable_mistral_community_chat_template: bool = False):
  82. if type(self) is ModelBase or \
  83. type(self) is TextModel or \
  84. type(self) is MmprojModel:
  85. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  86. self.dir_model = dir_model
  87. self.ftype = ftype
  88. self.fname_out = fname_out
  89. self.is_big_endian = is_big_endian
  90. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  91. self.use_temp_file = use_temp_file
  92. self.lazy = not eager or (remote_hf_model_id is not None)
  93. self.dry_run = dry_run
  94. self.remote_hf_model_id = remote_hf_model_id
  95. if remote_hf_model_id is not None:
  96. self.is_safetensors = True
  97. def get_remote_tensors() -> Iterator[tuple[str, Tensor]]:
  98. logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}")
  99. remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id)
  100. self.tensor_names = set(name for name in remote_tensors.keys())
  101. for name, remote_tensor in remote_tensors.items():
  102. yield (name, LazyTorchTensor.from_remote_tensor(remote_tensor))
  103. self.get_tensors = get_remote_tensors
  104. else:
  105. prefix = "model" if not self.is_mistral_format else "consolidated"
  106. self.part_names = ModelBase.get_model_part_names(self.dir_model, prefix, ".safetensors")
  107. self.is_safetensors = len(self.part_names) > 0
  108. if not self.is_safetensors:
  109. self.part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  110. self.hparams = ModelBase.load_hparams(self.dir_model, self.is_mistral_format) if hparams is None else hparams
  111. self.tensor_names = None
  112. self.metadata_override = metadata_override
  113. self.model_name = model_name
  114. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  115. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  116. if self.ftype == gguf.LlamaFileType.GUESSED:
  117. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  118. _, first_tensor = next(self.get_tensors())
  119. if first_tensor.dtype == torch.float16:
  120. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  121. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  122. else:
  123. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  124. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  125. # Configure GGUF Writer
  126. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  127. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  128. # Mistral specific
  129. self.disable_mistral_community_chat_template = disable_mistral_community_chat_template
  130. @classmethod
  131. def add_prefix_to_filename(cls, path: Path, prefix: str) -> Path:
  132. stem, suffix = path.stem, path.suffix
  133. new_name = f"{prefix}{stem}{suffix}"
  134. return path.with_name(new_name)
  135. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  136. key = next((k for k in keys if k in self.hparams), None)
  137. if key is not None:
  138. return self.hparams[key]
  139. if optional:
  140. return None
  141. raise KeyError(f"could not find any of: {keys}")
  142. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  143. tensor_names_from_parts: set[str] = set()
  144. if not self.is_mistral_format:
  145. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  146. index_name += ".index.json"
  147. index_file = self.dir_model / index_name
  148. if index_file.is_file():
  149. self.tensor_names = set()
  150. logger.info(f"gguf: loading model weight map from '{index_name}'")
  151. with open(index_file, "r", encoding="utf-8") as f:
  152. index: dict[str, Any] = json.load(f)
  153. weight_map = index.get("weight_map")
  154. if weight_map is None or not isinstance(weight_map, dict):
  155. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  156. self.tensor_names.update(weight_map.keys())
  157. else:
  158. self.tensor_names = tensor_names_from_parts
  159. weight_map = {}
  160. else:
  161. self.tensor_names = tensor_names_from_parts
  162. weight_map = {}
  163. for part_name in self.part_names:
  164. logger.info(f"gguf: loading model part '{part_name}'")
  165. ctx: ContextManager[Any]
  166. if self.is_safetensors:
  167. from safetensors import safe_open
  168. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  169. else:
  170. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  171. with ctx as model_part:
  172. tensor_names_from_parts.update(model_part.keys())
  173. for name in model_part.keys():
  174. if self.is_safetensors:
  175. if self.lazy:
  176. data = model_part.get_slice(name)
  177. data = LazyTorchTensor.from_safetensors_slice(data)
  178. else:
  179. data = model_part.get_tensor(name)
  180. else:
  181. data = model_part[name]
  182. if self.lazy:
  183. data = LazyTorchTensor.from_eager(data)
  184. yield name, data
  185. # verify tensor name presence and identify potentially missing files
  186. if len(tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  187. missing = sorted(self.tensor_names.difference(tensor_names_from_parts))
  188. extra = sorted(tensor_names_from_parts.difference(self.tensor_names))
  189. missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
  190. if len(extra) == 0 and len(missing_files) > 0:
  191. raise ValueError(f"Missing or incomplete model files: {missing_files}\n"
  192. f"Missing tensors: {missing}")
  193. else:
  194. raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
  195. f"Missing tensors: {missing}\n"
  196. f"Extra tensors: {extra}")
  197. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  198. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  199. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  200. name: str = gguf.TENSOR_NAMES[key]
  201. if "{bid}" in name:
  202. assert bid is not None
  203. name = name.format(bid=bid)
  204. return name + suffix
  205. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  206. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  207. return False
  208. key_name: str = gguf.TENSOR_NAMES[key]
  209. if "{bid}" in key_name:
  210. if bid is None:
  211. return False
  212. key_name = key_name.format(bid=bid)
  213. else:
  214. if bid is not None:
  215. return False
  216. return name == (key_name + suffix)
  217. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  218. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  219. if new_name is None:
  220. raise ValueError(f"Can not map tensor {name!r}")
  221. return new_name
  222. def set_gguf_parameters(self):
  223. raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses")
  224. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  225. del bid # unused
  226. return [(self.map_tensor_name(name), data_torch)]
  227. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  228. del name, new_name, bid, n_dims # unused
  229. return False
  230. # some models need extra generated tensors (like rope_freqs)
  231. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  232. return ()
  233. def prepare_tensors(self):
  234. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  235. for name, data_torch in chain(self.generate_extra_tensors(), self.get_tensors()):
  236. # we don't need these
  237. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  238. continue
  239. old_dtype = data_torch.dtype
  240. # convert any unsupported data types to float32
  241. if data_torch.dtype not in (torch.float16, torch.float32):
  242. data_torch = data_torch.to(torch.float32)
  243. # use the first number-like part of the tensor name as the block id
  244. bid = None
  245. for part in name.split("."):
  246. if part.isdecimal():
  247. bid = int(part)
  248. break
  249. for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
  250. # TODO: why do we squeeze here?
  251. # data = data_torch.squeeze().numpy()
  252. data = data_torch.numpy()
  253. n_dims = len(data.shape)
  254. data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
  255. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  256. if n_dims <= 1 or new_name.endswith("_norm.weight"):
  257. data_qtype = gguf.GGMLQuantizationType.F32
  258. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  259. # Some tensor types are always in float32
  260. if data_qtype is False and (
  261. any(
  262. self.match_model_tensor_name(new_name, key, bid)
  263. for key in (
  264. gguf.MODEL_TENSOR.FFN_GATE_INP,
  265. gguf.MODEL_TENSOR.POS_EMBD,
  266. gguf.MODEL_TENSOR.TOKEN_TYPES,
  267. gguf.MODEL_TENSOR.SSM_CONV1D,
  268. gguf.MODEL_TENSOR.SHORTCONV_CONV,
  269. gguf.MODEL_TENSOR.TIME_MIX_FIRST,
  270. gguf.MODEL_TENSOR.TIME_MIX_W1,
  271. gguf.MODEL_TENSOR.TIME_MIX_W2,
  272. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
  273. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
  274. gguf.MODEL_TENSOR.TIME_MIX_LERP_FUSED,
  275. gguf.MODEL_TENSOR.POSNET_NORM1,
  276. gguf.MODEL_TENSOR.POSNET_NORM2,
  277. gguf.MODEL_TENSOR.V_ENC_EMBD_POS,
  278. gguf.MODEL_TENSOR.A_ENC_EMBD_POS,
  279. gguf.MODEL_TENSOR.ALTUP_CORRECT_COEF,
  280. gguf.MODEL_TENSOR.ALTUP_PREDICT_COEF,
  281. )
  282. )
  283. or not new_name.endswith(".weight")
  284. ):
  285. data_qtype = gguf.GGMLQuantizationType.F32
  286. if data_qtype is False and any(
  287. self.match_model_tensor_name(new_name, key, bid)
  288. for key in (
  289. gguf.MODEL_TENSOR.TOKEN_EMBD,
  290. gguf.MODEL_TENSOR.PER_LAYER_TOKEN_EMBD,
  291. gguf.MODEL_TENSOR.OUTPUT,
  292. gguf.MODEL_TENSOR.ALTUP_ROUTER,
  293. gguf.MODEL_TENSOR.LAUREL_L,
  294. gguf.MODEL_TENSOR.LAUREL_R,
  295. )
  296. ):
  297. if self.ftype in (
  298. gguf.LlamaFileType.MOSTLY_TQ1_0,
  299. gguf.LlamaFileType.MOSTLY_TQ2_0,
  300. ):
  301. # TODO: use Q4_K and Q6_K
  302. data_qtype = gguf.GGMLQuantizationType.F16
  303. # No override (data_qtype is False), or wants to be quantized (data_qtype is True)
  304. if isinstance(data_qtype, bool):
  305. if self.ftype == gguf.LlamaFileType.ALL_F32:
  306. data_qtype = gguf.GGMLQuantizationType.F32
  307. elif self.ftype == gguf.LlamaFileType.MOSTLY_F16:
  308. data_qtype = gguf.GGMLQuantizationType.F16
  309. elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  310. data_qtype = gguf.GGMLQuantizationType.BF16
  311. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
  312. data_qtype = gguf.GGMLQuantizationType.Q8_0
  313. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
  314. data_qtype = gguf.GGMLQuantizationType.TQ1_0
  315. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
  316. data_qtype = gguf.GGMLQuantizationType.TQ2_0
  317. else:
  318. raise ValueError(f"Unknown file type: {self.ftype.name}")
  319. try:
  320. data = gguf.quants.quantize(data, data_qtype)
  321. except gguf.QuantError as e:
  322. logger.warning("%s, %s", e, "falling back to F16")
  323. data_qtype = gguf.GGMLQuantizationType.F16
  324. data = gguf.quants.quantize(data, data_qtype)
  325. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  326. # reverse shape to make it similar to the internal ggml dimension order
  327. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  328. # n_dims is implicit in the shape
  329. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  330. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  331. def set_type(self):
  332. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  333. def prepare_metadata(self, vocab_only: bool):
  334. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  335. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  336. # If we are using HF model id, set the metadata name to the model id
  337. if self.remote_hf_model_id:
  338. self.metadata.name = self.remote_hf_model_id
  339. # Fallback to model directory name if metadata name is still missing
  340. if self.metadata.name is None:
  341. self.metadata.name = self.dir_model.name
  342. # Generate parameter weight class (useful for leader boards) if not yet determined
  343. if self.metadata.size_label is None and total_params > 0:
  344. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  345. self.set_type()
  346. logger.info("Set meta model")
  347. self.metadata.set_gguf_meta_model(self.gguf_writer)
  348. logger.info("Set model parameters")
  349. self.set_gguf_parameters()
  350. logger.info("Set model quantization version")
  351. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  352. def write_vocab(self):
  353. raise NotImplementedError("write_vocab() must be implemented in subclasses")
  354. def write(self):
  355. self.prepare_tensors()
  356. self.prepare_metadata(vocab_only=False)
  357. self.gguf_writer.write_header_to_file(path=self.fname_out)
  358. self.gguf_writer.write_kv_data_to_file()
  359. self.gguf_writer.write_tensors_to_file(progress=True)
  360. self.gguf_writer.close()
  361. @staticmethod
  362. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  363. part_names: list[str] = []
  364. for filename in os.listdir(dir_model):
  365. if filename.startswith(prefix) and filename.endswith(suffix):
  366. part_names.append(filename)
  367. part_names.sort()
  368. return part_names
  369. @staticmethod
  370. def load_hparams(dir_model: Path, is_mistral_format: bool):
  371. if is_mistral_format:
  372. with open(dir_model / "params.json", "r", encoding="utf-8") as f:
  373. config = json.load(f)
  374. return config
  375. try:
  376. # for security reason, we don't allow loading remote code by default
  377. # if a model need remote code, we will fallback to config.json
  378. config = AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict()
  379. except Exception as e:
  380. logger.warning(f"Failed to load model config from {dir_model}: {e}")
  381. logger.warning("Trying to load config.json instead")
  382. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  383. config = json.load(f)
  384. if "llm_config" in config:
  385. # rename for InternVL
  386. config["text_config"] = config["llm_config"]
  387. if "thinker_config" in config:
  388. # rename for Qwen2.5-Omni
  389. config["text_config"] = config["thinker_config"]["text_config"]
  390. return config
  391. @classmethod
  392. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  393. assert names
  394. def func(modelcls: AnyModel) -> AnyModel:
  395. model_type = ModelType.MMPROJ if modelcls.model_arch == gguf.MODEL_ARCH.MMPROJ else ModelType.TEXT
  396. for name in names:
  397. cls._model_classes[model_type][name] = modelcls
  398. return modelcls
  399. return func
  400. @classmethod
  401. def print_registered_models(cls):
  402. for model_type, model_classes in cls._model_classes.items():
  403. logger.error(f"{model_type.name} models:")
  404. for name in sorted(model_classes.keys()):
  405. logger.error(f" - {name}")
  406. @classmethod
  407. def from_model_architecture(cls, arch: str, model_type = ModelType.TEXT) -> type[ModelBase]:
  408. try:
  409. return cls._model_classes[model_type][arch]
  410. except KeyError:
  411. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  412. class TextModel(ModelBase):
  413. model_type = ModelType.TEXT
  414. hf_arch: str
  415. def __init__(self, *args, **kwargs):
  416. super().__init__(*args, **kwargs)
  417. if not self.is_mistral_format:
  418. self.hf_arch = get_model_architecture(self.hparams, self.model_type)
  419. else:
  420. self.hf_arch = ""
  421. if "text_config" in self.hparams:
  422. # move the text_config to the root level
  423. self.hparams = {**self.hparams, **self.hparams["text_config"]}
  424. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  425. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  426. @classmethod
  427. def __init_subclass__(cls):
  428. # can't use an abstract property, because overriding it without type errors
  429. # would require using decorated functions instead of simply defining the property
  430. if "model_arch" not in cls.__dict__:
  431. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  432. def set_vocab(self):
  433. self._set_vocab_gpt2()
  434. def prepare_metadata(self, vocab_only: bool):
  435. super().prepare_metadata(vocab_only=vocab_only)
  436. total_params = self.gguf_writer.get_total_parameter_count()[0]
  437. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  438. output_type: str = self.ftype.name.partition("_")[2]
  439. # Filename Output
  440. if self.fname_out.is_dir():
  441. # Generate default filename based on model specification and available metadata
  442. if not vocab_only:
  443. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  444. else:
  445. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  446. # Use the default filename
  447. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  448. else:
  449. # Output path is a custom defined templated filename
  450. # Note: `not is_dir()` is used because `.is_file()` will not detect
  451. # file template strings as it doesn't actually exist as a file
  452. # Process templated file name with the output ftype, useful with the "auto" ftype
  453. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  454. logger.info("Set model tokenizer")
  455. self.set_vocab()
  456. def set_gguf_parameters(self):
  457. self.gguf_writer.add_block_count(self.block_count)
  458. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length"], optional=True)) is not None:
  459. self.gguf_writer.add_context_length(n_ctx)
  460. logger.info(f"gguf: context length = {n_ctx}")
  461. if (n_embd := self.find_hparam(["hidden_size", "n_embd", "dim"], optional=True)) is not None:
  462. self.gguf_writer.add_embedding_length(n_embd)
  463. logger.info(f"gguf: embedding length = {n_embd}")
  464. if (n_ff := self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"], optional=True)) is not None:
  465. self.gguf_writer.add_feed_forward_length(n_ff)
  466. logger.info(f"gguf: feed forward length = {n_ff}")
  467. if (n_head := self.find_hparam(["num_attention_heads", "n_head", "n_heads"], optional=True)) is not None:
  468. self.gguf_writer.add_head_count(n_head)
  469. logger.info(f"gguf: head count = {n_head}")
  470. if (n_head_kv := self.find_hparam(["num_key_value_heads", "n_kv_heads"], optional=True)) is not None:
  471. self.gguf_writer.add_head_count_kv(n_head_kv)
  472. logger.info(f"gguf: key-value head count = {n_head_kv}")
  473. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  474. self.gguf_writer.add_rope_freq_base(rope_theta)
  475. logger.info(f"gguf: rope theta = {rope_theta}")
  476. if (f_rms_eps := self.find_hparam(["rms_norm_eps", "norm_eps"], optional=True)) is not None:
  477. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  478. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  479. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  480. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  481. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  482. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  483. self.gguf_writer.add_expert_count(n_experts)
  484. logger.info(f"gguf: expert count = {n_experts}")
  485. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  486. self.gguf_writer.add_expert_used_count(n_experts_used)
  487. logger.info(f"gguf: experts used count = {n_experts_used}")
  488. if (head_dim := self.hparams.get("head_dim")) is not None:
  489. self.gguf_writer.add_key_length(head_dim)
  490. self.gguf_writer.add_value_length(head_dim)
  491. self.gguf_writer.add_file_type(self.ftype)
  492. logger.info(f"gguf: file type = {self.ftype}")
  493. def write_vocab(self):
  494. if len(self.gguf_writer.tensors) != 1:
  495. raise ValueError('Splitting the vocabulary is not supported')
  496. self.prepare_metadata(vocab_only=True)
  497. self.gguf_writer.write_header_to_file(path=self.fname_out)
  498. self.gguf_writer.write_kv_data_to_file()
  499. self.gguf_writer.close()
  500. def does_token_look_special(self, token: str | bytes) -> bool:
  501. if isinstance(token, (bytes, bytearray)):
  502. token_text = token.decode(encoding="utf-8")
  503. elif isinstance(token, memoryview):
  504. token_text = token.tobytes().decode(encoding="utf-8")
  505. else:
  506. token_text = token
  507. # Some models mark some added tokens which ought to be control tokens as not special.
  508. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  509. seems_special = token_text in (
  510. "<pad>", # deepseek-coder
  511. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  512. )
  513. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  514. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  515. # TODO: should these be marked as UNUSED instead? (maybe not)
  516. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  517. return seems_special
  518. # used for GPT-2 BPE and WordPiece vocabs
  519. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  520. tokens: list[str] = []
  521. toktypes: list[int] = []
  522. from transformers import AutoTokenizer
  523. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  524. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  525. assert max(tokenizer.vocab.values()) < vocab_size
  526. tokpre = self.get_vocab_base_pre(tokenizer)
  527. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  528. added_vocab = tokenizer.get_added_vocab()
  529. added_tokens_decoder = tokenizer.added_tokens_decoder
  530. for i in range(vocab_size):
  531. if i not in reverse_vocab:
  532. tokens.append(f"[PAD{i}]")
  533. toktypes.append(gguf.TokenType.UNUSED)
  534. else:
  535. token: str = reverse_vocab[i]
  536. if token in added_vocab:
  537. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  538. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  539. if not added_tokens_decoder[i].normalized:
  540. previous_token = token
  541. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  542. if previous_token != token:
  543. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  544. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  545. toktypes.append(gguf.TokenType.CONTROL)
  546. else:
  547. # NOTE: this was added for Gemma.
  548. # Encoding and decoding the tokens above isn't sufficient for this case.
  549. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  550. toktypes.append(gguf.TokenType.USER_DEFINED)
  551. else:
  552. toktypes.append(gguf.TokenType.NORMAL)
  553. tokens.append(token)
  554. return tokens, toktypes, tokpre
  555. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  556. # do not modify it manually!
  557. # ref: https://github.com/ggml-org/llama.cpp/pull/6920
  558. # Marker: Start get_vocab_base_pre
  559. def get_vocab_base_pre(self, tokenizer) -> str:
  560. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  561. # is specific for the BPE pre-tokenizer used by the model
  562. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  563. # use in llama.cpp to implement the same pre-tokenizer
  564. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  565. chktok = tokenizer.encode(chktxt)
  566. chkhsh = sha256(str(chktok).encode()).hexdigest()
  567. logger.debug(f"chktok: {chktok}")
  568. logger.debug(f"chkhsh: {chkhsh}")
  569. res = None
  570. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  571. # or pull the latest version of the model from Huggingface
  572. # don't edit the hashes manually!
  573. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
  574. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  575. res = "chatglm-bpe"
  576. if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
  577. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  578. res = "chatglm-bpe"
  579. if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
  580. # ref: https://huggingface.co/THUDM/glm-4-9b-hf
  581. res = "glm4"
  582. if chkhsh == "9ca2dd618e8afaf09731a7cf6e2105b373ba6a1821559f258b272fe83e6eb902":
  583. # ref: https://huggingface.co/zai-org/GLM-4.5-Air
  584. res = "glm4"
  585. if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
  586. # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
  587. res = "minerva-7b"
  588. if chkhsh == "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664":
  589. # ref: https://huggingface.co/tencent/Hunyuan-A13B-Instruct
  590. res = "hunyuan"
  591. if chkhsh == "bba3b3366b646dbdded5dbc42d59598b849371afc42f7beafa914afaa5b70aa6":
  592. # ref: https://huggingface.co/tencent/Hunyuan-4B-Instruct
  593. res = "hunyuan-dense"
  594. if chkhsh == "a6b57017d60e6edb4d88ecc2845188e0eb333a70357e45dcc9b53964a73bbae6":
  595. # ref: https://huggingface.co/tiiuae/Falcon-H1-0.5B-Base
  596. res = "falcon-h1"
  597. if chkhsh == "60476e1243776c4fb1b993dbd7a5f15ac22f83c80afdf425fa5ae01c8d44ef86":
  598. # ref: https://huggingface.co/tiiuae/Falcon-H1-1B-Base
  599. res = "falcon-h1"
  600. if chkhsh == "3eda48b4c4dc7de733d1a8b3e3b4a85243dbbf704da2ee9d42c6beced8897896":
  601. # ref: https://huggingface.co/tiiuae/Falcon-H1-7B-Base
  602. res = "falcon-h1"
  603. if chkhsh == "48f8e02c0359c0bbdd82f26909171fac1c18a457bb47573ed1fe3bbb2c1cfd4b":
  604. # ref: https://huggingface.co/tiiuae/Falcon-H1-34B-Base
  605. res = "falcon-h1"
  606. if chkhsh == "81212dc7cdb7e0c1074ca62c5aeab0d43c9f52b8a737be7b12a777c953027890":
  607. # ref: https://huggingface.co/moonshotai/Kimi-K2-Base
  608. res = "kimi-k2"
  609. if chkhsh == "d4540891389ea895b53b399da6ac824becc30f2fba0e9ddbb98f92e55ca0e97c":
  610. # ref: https://huggingface.co/Qwen/Qwen3-Embedding-0.6B
  611. res = "qwen2"
  612. if chkhsh == "66b8d4e19ab16c3bfd89bce5d785fb7e0155e8648708a1f42077cb9fe002c273":
  613. # ref: https://huggingface.co/alvarobartt/grok-2-tokenizer
  614. res = "grok-2"
  615. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  616. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  617. res = "llama-bpe"
  618. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  619. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  620. res = "deepseek-llm"
  621. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  622. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  623. res = "deepseek-coder"
  624. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  625. # ref: https://huggingface.co/tiiuae/falcon-7b
  626. res = "falcon"
  627. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  628. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  629. res = "bert-bge"
  630. if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e":
  631. # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base
  632. res = "falcon3"
  633. if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
  634. # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
  635. res = "bert-bge-large"
  636. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  637. # ref: https://huggingface.co/mosaicml/mpt-7b
  638. res = "mpt"
  639. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  640. # ref: https://huggingface.co/bigcode/starcoder2-3b
  641. res = "starcoder"
  642. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  643. # ref: https://huggingface.co/openai-community/gpt2
  644. res = "gpt-2"
  645. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  646. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  647. res = "stablelm2"
  648. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  649. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  650. res = "refact"
  651. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  652. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  653. res = "command-r"
  654. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  655. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  656. res = "qwen2"
  657. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  658. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  659. res = "olmo"
  660. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  661. # ref: https://huggingface.co/databricks/dbrx-base
  662. res = "dbrx"
  663. if chkhsh == "c7699093ba4255a91e702aa38a596aa81669f3525dae06c2953267dde580f448":
  664. # ref: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  665. res = "jina-v1-en"
  666. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  667. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  668. res = "jina-v2-en"
  669. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  670. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  671. res = "jina-v2-es"
  672. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  673. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  674. res = "jina-v2-de"
  675. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  676. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  677. res = "smaug-bpe"
  678. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  679. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  680. res = "poro-chat"
  681. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  682. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  683. res = "jina-v2-code"
  684. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  685. # ref: https://huggingface.co/LumiOpen/Viking-7B
  686. res = "viking"
  687. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  688. # ref: https://huggingface.co/core42/jais-13b
  689. res = "jais"
  690. if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f":
  691. # ref: https://huggingface.co/WisdomShell/CodeShell-7B
  692. res = "codeshell"
  693. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  694. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  695. res = "tekken"
  696. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  697. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  698. res = "smollm"
  699. if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
  700. # ref: https://huggingface.co/bigscience/bloom
  701. res = "bloom"
  702. if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
  703. # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
  704. res = "gpt3-finnish"
  705. if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
  706. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
  707. res = "exaone"
  708. if chkhsh == "fcace8b9cac38ce847670c970cd5892031a753a1ef381abd1d9af00f713da085":
  709. # ref: https://huggingface.co/microsoft/phi-2
  710. res = "phi-2"
  711. if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
  712. # ref: https://huggingface.co/facebook/chameleon-7b
  713. res = "chameleon"
  714. if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65":
  715. # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base
  716. res = "roberta-bpe"
  717. if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb":
  718. # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct
  719. res = "gigachat"
  720. if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
  721. # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
  722. res = "megrez"
  723. if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
  724. # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
  725. res = "deepseek-v3"
  726. if chkhsh == "b3f499bb4255f8ca19fccd664443283318f2fd2414d5e0b040fbdd0cc195d6c5":
  727. # ref: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
  728. res = "deepseek-r1-qwen"
  729. if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e":
  730. # ref: https://huggingface.co/Xenova/gpt-4o
  731. res = "gpt-4o"
  732. if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
  733. # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
  734. res = "superbpe"
  735. if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
  736. # ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
  737. res = "trillion"
  738. if chkhsh == "96a5f08be6259352137b512d4157e333e21df7edd3fcd152990608735a65b224":
  739. # ref: https://huggingface.co/inclusionAI/Ling-lite
  740. res = "bailingmoe"
  741. if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
  742. # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
  743. res = "llama4"
  744. if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3":
  745. # ref: https://huggingface.co/mistral-community/pixtral-12b
  746. res = "pixtral"
  747. if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
  748. # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
  749. res = "seed-coder"
  750. if chkhsh == "b0a6b1c0bd5998ebd9df08611efde34a4ff03faed45ae09c43e6b31ebd4b94cf":
  751. # ref: https://huggingface.co/skt/A.X-4.0
  752. res = "a.x-4.0"
  753. if chkhsh == "f6791d196f87ce6b56a7d234be618e0d58f8cda3549416635b2bebcd22cd95c4":
  754. # ref: https://huggingface.co/K-intelligence/Midm-2.0-Base-Instruct
  755. res = "midm-2.0"
  756. if chkhsh == "169bf0296a13c4d9b7672313f749eb36501d931022de052aad6e36f2bf34dd51":
  757. # ref: https://huggingface.co/LiquidAI/LFM2-Tokenizer
  758. res = "lfm2"
  759. if chkhsh == "2085e1638f6c377a0aa4ead21b27bb4cb941bf800df86ed391011769c1758dfb":
  760. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B
  761. res = "exaone4"
  762. if chkhsh == "a1e163ecab2e718a4c829d1148b6e86824ec36163bb71941c3dca9cd5ac25756":
  763. # ref: https://huggingface.co/JetBrains/Mellum-4b-base
  764. res = "mellum"
  765. if chkhsh == "9b1be57e70d20d9501b2b3186e792d81181ae36ada3903c26f9fea418cf87206":
  766. # ref: https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base
  767. res = "llada-moe"
  768. if res is None:
  769. logger.warning("\n")
  770. logger.warning("**************************************************************************************")
  771. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  772. logger.warning("** There are 2 possible reasons for this:")
  773. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  774. logger.warning("** - the pre-tokenization config has changed upstream")
  775. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  776. logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
  777. logger.warning("**")
  778. logger.warning(f"** chkhsh: {chkhsh}")
  779. logger.warning("**************************************************************************************")
  780. logger.warning("\n")
  781. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  782. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  783. logger.debug(f"chkhsh: {chkhsh}")
  784. return res
  785. # Marker: End get_vocab_base_pre
  786. def _set_vocab_none(self) -> None:
  787. self.gguf_writer.add_tokenizer_model("none")
  788. def _set_vocab_gpt2(self) -> None:
  789. tokens, toktypes, tokpre = self.get_vocab_base()
  790. self.gguf_writer.add_tokenizer_model("gpt2")
  791. self.gguf_writer.add_tokenizer_pre(tokpre)
  792. self.gguf_writer.add_token_list(tokens)
  793. self.gguf_writer.add_token_types(toktypes)
  794. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  795. special_vocab.add_to_gguf(self.gguf_writer)
  796. def _set_vocab_qwen(self):
  797. dir_model = self.dir_model
  798. hparams = self.hparams
  799. tokens: list[str] = []
  800. toktypes: list[int] = []
  801. from transformers import AutoTokenizer
  802. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  803. vocab_size = hparams["vocab_size"]
  804. assert max(tokenizer.get_vocab().values()) < vocab_size
  805. tokpre = self.get_vocab_base_pre(tokenizer)
  806. merges = []
  807. vocab = {}
  808. mergeable_ranks = tokenizer.mergeable_ranks
  809. for token, rank in mergeable_ranks.items():
  810. vocab[QwenModel.token_bytes_to_string(token)] = rank
  811. if len(token) == 1:
  812. continue
  813. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  814. assert len(merged) == 2
  815. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  816. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  817. added_vocab = tokenizer.special_tokens
  818. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  819. for i in range(vocab_size):
  820. if i not in reverse_vocab:
  821. tokens.append(f"[PAD{i}]")
  822. toktypes.append(gguf.TokenType.UNUSED)
  823. elif reverse_vocab[i] in added_vocab:
  824. tokens.append(reverse_vocab[i])
  825. toktypes.append(gguf.TokenType.CONTROL)
  826. else:
  827. tokens.append(reverse_vocab[i])
  828. toktypes.append(gguf.TokenType.NORMAL)
  829. self.gguf_writer.add_tokenizer_model("gpt2")
  830. self.gguf_writer.add_tokenizer_pre(tokpre)
  831. self.gguf_writer.add_token_list(tokens)
  832. self.gguf_writer.add_token_types(toktypes)
  833. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  834. special_vocab.merges = merges
  835. # only add special tokens when they were not already loaded from config.json
  836. if len(special_vocab.special_token_ids) == 0:
  837. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  838. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  839. # this one is usually not in config.json anyway
  840. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  841. special_vocab.add_to_gguf(self.gguf_writer)
  842. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  843. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  844. self.gguf_writer.add_tokenizer_model("llama")
  845. self.gguf_writer.add_tokenizer_pre("default")
  846. self.gguf_writer.add_token_list(tokens)
  847. self.gguf_writer.add_token_scores(scores)
  848. self.gguf_writer.add_token_types(toktypes)
  849. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  850. special_vocab.add_to_gguf(self.gguf_writer)
  851. def _create_vocab_sentencepiece(self):
  852. from sentencepiece import SentencePieceProcessor
  853. tokenizer_path = self.dir_model / 'tokenizer.model'
  854. if not tokenizer_path.is_file():
  855. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  856. tokenizer = SentencePieceProcessor()
  857. tokenizer.LoadFromFile(str(tokenizer_path))
  858. vocab_size = self.find_hparam([
  859. "vocab_size_per_layer_input", # gemma3n
  860. "vocab_size",
  861. ], optional=True) or tokenizer.vocab_size()
  862. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  863. scores: list[float] = [-10000.0] * vocab_size
  864. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  865. for token_id in range(tokenizer.vocab_size()):
  866. if token_id >= vocab_size:
  867. logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}')
  868. break
  869. piece = tokenizer.IdToPiece(token_id)
  870. text = piece.encode("utf-8")
  871. score = tokenizer.GetScore(token_id)
  872. toktype = SentencePieceTokenTypes.NORMAL
  873. if tokenizer.IsUnknown(token_id):
  874. toktype = SentencePieceTokenTypes.UNKNOWN
  875. elif tokenizer.IsControl(token_id):
  876. toktype = SentencePieceTokenTypes.CONTROL
  877. elif tokenizer.IsUnused(token_id):
  878. toktype = SentencePieceTokenTypes.UNUSED
  879. elif tokenizer.IsByte(token_id):
  880. toktype = SentencePieceTokenTypes.BYTE
  881. tokens[token_id] = text
  882. scores[token_id] = score
  883. toktypes[token_id] = toktype
  884. added_tokens_file = self.dir_model / 'added_tokens.json'
  885. if added_tokens_file.is_file():
  886. with open(added_tokens_file, "r", encoding="utf-8") as f:
  887. added_tokens_json = json.load(f)
  888. for key in added_tokens_json:
  889. token_id = added_tokens_json[key]
  890. if token_id >= vocab_size:
  891. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  892. continue
  893. tokens[token_id] = key.encode("utf-8")
  894. scores[token_id] = -1000.0
  895. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  896. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  897. if tokenizer_config_file.is_file():
  898. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  899. tokenizer_config_json = json.load(f)
  900. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  901. for token_id, token_data in added_tokens_decoder.items():
  902. token_id = int(token_id)
  903. token: str = token_data["content"]
  904. if token_id >= vocab_size:
  905. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  906. continue
  907. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  908. if tokens[token_id] != token.encode("utf-8"):
  909. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  910. if token_data.get("special") or self.does_token_look_special(token):
  911. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  912. else:
  913. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  914. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  915. scores[token_id] = -1000.0
  916. tokens[token_id] = token.encode("utf-8")
  917. if vocab_size > len(tokens):
  918. pad_count = vocab_size - len(tokens)
  919. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  920. for i in range(1, pad_count + 1):
  921. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  922. scores.append(-1000.0)
  923. toktypes.append(SentencePieceTokenTypes.UNUSED)
  924. return tokens, scores, toktypes
  925. def _set_vocab_llama_hf(self):
  926. vocab = gguf.LlamaHfVocab(self.dir_model)
  927. tokens = []
  928. scores = []
  929. toktypes = []
  930. for text, score, toktype in vocab.all_tokens():
  931. tokens.append(text)
  932. scores.append(score)
  933. toktypes.append(toktype)
  934. assert len(tokens) == vocab.vocab_size
  935. self.gguf_writer.add_tokenizer_model("llama")
  936. self.gguf_writer.add_tokenizer_pre("default")
  937. self.gguf_writer.add_token_list(tokens)
  938. self.gguf_writer.add_token_scores(scores)
  939. self.gguf_writer.add_token_types(toktypes)
  940. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  941. special_vocab.add_to_gguf(self.gguf_writer)
  942. def _set_vocab_rwkv_world(self):
  943. assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file()
  944. vocab_size = self.hparams.get("vocab_size", 65536)
  945. tokens: list[bytes] = ['<s>'.encode("utf-8")]
  946. toktypes: list[int] = [gguf.TokenType.CONTROL]
  947. with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f:
  948. lines = f.readlines()
  949. for line in lines:
  950. parts = line.split(' ')
  951. assert len(parts) >= 3
  952. token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1])
  953. token = token.encode("utf-8") if isinstance(token, str) else token
  954. assert isinstance(token, bytes)
  955. assert len(token) == token_len
  956. token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff"
  957. tokens.append(token_text.encode("utf-8"))
  958. toktypes.append(gguf.TokenType.NORMAL)
  959. remainder = vocab_size - len(tokens)
  960. assert remainder >= 0
  961. for i in range(len(tokens), vocab_size):
  962. tokens.append(f"[PAD{i}]".encode("utf-8"))
  963. toktypes.append(gguf.TokenType.UNUSED)
  964. self.gguf_writer.add_tokenizer_model("rwkv")
  965. self.gguf_writer.add_token_list(tokens)
  966. self.gguf_writer.add_token_types(toktypes)
  967. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  968. if special_vocab.chat_template is None:
  969. template_path = Path(__file__).parent / "models" / "templates" / "llama-cpp-rwkv-world.jinja"
  970. if template_path.is_file():
  971. with open(template_path, "r", encoding="utf-8") as f:
  972. template = f.read()
  973. else:
  974. template = "rwkv-world"
  975. special_vocab.chat_template = template
  976. # hack: Add '\n\n' as the EOT token to make it chat normally
  977. special_vocab._set_special_token("eot", 261)
  978. # hack: Override these as they have already been set (incorrectly)
  979. special_vocab.special_token_ids["bos"] = 0
  980. special_vocab.special_token_ids["eos"] = 0
  981. special_vocab.add_to_gguf(self.gguf_writer)
  982. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  983. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  984. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  985. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  986. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  987. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  988. assert field # tokenizer model
  989. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  990. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  991. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  992. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  993. assert field # token list
  994. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  995. if model_name == "llama-spm":
  996. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  997. assert field # token scores
  998. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  999. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  1000. assert field # token types
  1001. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  1002. if model_name != "llama-spm":
  1003. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  1004. assert field # token merges
  1005. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  1006. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  1007. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  1008. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  1009. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  1010. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  1011. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  1012. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  1013. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  1014. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  1015. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  1016. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  1017. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  1018. def _try_set_pooling_type(self) -> None:
  1019. # get pooling path
  1020. pooling_path = None
  1021. module_path = self.dir_model / "modules.json"
  1022. if module_path.is_file():
  1023. with open(module_path, encoding="utf-8") as f:
  1024. modules = json.load(f)
  1025. for mod in modules:
  1026. if mod["type"] == "sentence_transformers.models.Pooling":
  1027. pooling_path = mod["path"]
  1028. break
  1029. # get pooling type
  1030. if pooling_path is not None:
  1031. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  1032. pooling = json.load(f)
  1033. if pooling["pooling_mode_mean_tokens"]:
  1034. pooling_type = gguf.PoolingType.MEAN
  1035. elif pooling["pooling_mode_cls_token"]:
  1036. pooling_type = gguf.PoolingType.CLS
  1037. elif pooling["pooling_mode_lasttoken"]:
  1038. pooling_type = gguf.PoolingType.LAST
  1039. else:
  1040. raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported")
  1041. self.gguf_writer.add_pooling_type(pooling_type)
  1042. def _set_vocab_interns1(self):
  1043. tokens: list[str] = []
  1044. toktypes: list[int] = []
  1045. from transformers import AutoTokenizer
  1046. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  1047. vocab = getattr(tokenizer, 'vocab', tokenizer.get_vocab())
  1048. vocab_size = self.hparams.get("vocab_size", len(vocab))
  1049. assert max(vocab.values()) < vocab_size
  1050. tokpre = self.get_vocab_base_pre(tokenizer)
  1051. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab.items()}
  1052. added_vocab = tokenizer.get_added_vocab()
  1053. added_tokens_decoder = tokenizer.added_tokens_decoder
  1054. for i in range(vocab_size):
  1055. if i not in reverse_vocab:
  1056. tokens.append(f"[PAD{i}]")
  1057. toktypes.append(gguf.TokenType.UNUSED)
  1058. else:
  1059. token: str = reverse_vocab[i]
  1060. if token in added_vocab:
  1061. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  1062. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  1063. if not added_tokens_decoder[i].normalized:
  1064. previous_token = token
  1065. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  1066. if previous_token != token:
  1067. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  1068. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  1069. toktypes.append(gguf.TokenType.CONTROL)
  1070. else:
  1071. toktypes.append(gguf.TokenType.USER_DEFINED)
  1072. else:
  1073. toktypes.append(gguf.TokenType.NORMAL)
  1074. tokens.append(token)
  1075. self.gguf_writer.add_tokenizer_model("gpt2")
  1076. self.gguf_writer.add_tokenizer_pre(tokpre)
  1077. self.gguf_writer.add_token_list(tokens)
  1078. self.gguf_writer.add_token_types(toktypes)
  1079. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  1080. special_vocab._set_special_token("bos", 151643)
  1081. special_vocab.add_to_gguf(self.gguf_writer)
  1082. class MmprojModel(ModelBase):
  1083. model_type = ModelType.MMPROJ
  1084. model_arch = gguf.MODEL_ARCH.MMPROJ
  1085. preprocessor_config: dict[str, Any]
  1086. global_config: dict[str, Any]
  1087. n_block_keys = ["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"]
  1088. has_vision_encoder: bool = True # by default
  1089. has_audio_encoder: bool = False
  1090. # for models having multiple encoders, we need to separate their hparams
  1091. hparams_vision: dict[str, Any] | None = None
  1092. hparams_audio: dict[str, Any] | None = None
  1093. def __init__(self, *args, **kwargs):
  1094. super().__init__(*args, **kwargs)
  1095. if self.model_arch != gguf.MODEL_ARCH.MMPROJ:
  1096. raise TypeError("MmprojModel must be subclassed with model_arch = gguf.MODEL_ARCH.MMPROJ")
  1097. # get n_embd of the text model
  1098. if not self.is_mistral_format:
  1099. if "text_config" not in self.hparams:
  1100. self.hparams["text_config"] = {}
  1101. if "audio_config" not in self.hparams:
  1102. self.hparams["audio_config"] = {}
  1103. text_config = {**self.hparams, **self.hparams["text_config"]}
  1104. self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0))
  1105. else:
  1106. text_config = {
  1107. k: v for k, v in self.hparams.items() if k not in ["vision_encoder", "audio_encoder"]
  1108. }
  1109. self.n_embd_text = text_config.get("hidden_dim", 0)
  1110. assert self.n_embd_text > 0, "n_embd not found in hparams"
  1111. # move vision config to the top level, while preserving the original hparams in global_config
  1112. import copy
  1113. self.global_config = copy.deepcopy(self.hparams)
  1114. self.hparams_vision = self.get_vision_config()
  1115. self.hparams_audio = self.get_audio_config()
  1116. if self.hparams_vision is None and self.hparams_audio is None:
  1117. raise ValueError("vision_config / audio_config not found in hparams")
  1118. # for compat with vision-only models
  1119. self.hparams = self.hparams_vision or self.hparams_audio or self.hparams
  1120. # TODO @ngxson : this is a hack to support both vision and audio encoders
  1121. have_multiple_encoders = self.has_audio_encoder and self.has_vision_encoder
  1122. self.block_count = 128 if have_multiple_encoders else self.find_hparam(self.n_block_keys, True)
  1123. self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.MMPROJ, self.block_count)
  1124. # load preprocessor config
  1125. if not self.is_mistral_format:
  1126. with open(self.dir_model / "preprocessor_config.json", "r", encoding="utf-8") as f:
  1127. self.preprocessor_config = json.load(f)
  1128. def get_vision_config(self) -> dict[str, Any] | None:
  1129. config_name = "vision_config" if not self.is_mistral_format else "vision_encoder"
  1130. return self.global_config.get(config_name)
  1131. def get_audio_config(self) -> dict[str, Any] | None:
  1132. return self.global_config.get("audio_config")
  1133. def set_type(self):
  1134. self.gguf_writer.add_type(gguf.GGUFType.MMPROJ)
  1135. def set_gguf_parameters(self):
  1136. self.gguf_writer.add_file_type(self.ftype)
  1137. if self.has_vision_encoder:
  1138. self.gguf_writer.add_clip_has_vision_encoder(True)
  1139. self.gguf_writer.add_vision_projection_dim(self.n_embd_text)
  1140. # vision config
  1141. self.gguf_writer.add_vision_image_size(self.find_vparam(["image_size"]))
  1142. self.gguf_writer.add_vision_patch_size(self.find_vparam(["patch_size"]))
  1143. self.gguf_writer.add_vision_embedding_length(self.find_vparam(["hidden_size"]))
  1144. self.gguf_writer.add_vision_feed_forward_length(self.find_vparam(["intermediate_size"]))
  1145. self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys))
  1146. self.gguf_writer.add_vision_head_count(self.find_vparam(["num_attention_heads"]))
  1147. # preprocessor config
  1148. image_mean = DATASET_MEAN if self.is_mistral_format else self.preprocessor_config["image_mean"]
  1149. image_std = DATASET_STD if self.is_mistral_format else self.preprocessor_config["image_std"]
  1150. self.gguf_writer.add_vision_image_mean(image_mean)
  1151. self.gguf_writer.add_vision_image_std(image_std)
  1152. if self.has_audio_encoder:
  1153. self.gguf_writer.add_clip_has_audio_encoder(True)
  1154. self.gguf_writer.add_audio_projection_dim(self.n_embd_text)
  1155. # audio config
  1156. self.gguf_writer.add_audio_embedding_length(self.find_aparam(["hidden_size"]))
  1157. self.gguf_writer.add_audio_feed_forward_length(self.find_aparam(["intermediate_size"]))
  1158. self.gguf_writer.add_audio_block_count(self.find_aparam(self.n_block_keys))
  1159. self.gguf_writer.add_audio_head_count(self.find_aparam(["num_attention_heads"]))
  1160. if not self.has_vision_encoder and not self.has_audio_encoder:
  1161. raise ValueError("MmprojModel must have either vision or audio encoder")
  1162. def write_vocab(self):
  1163. raise ValueError("MmprojModel does not support vocab writing")
  1164. def find_vparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1165. assert self.hparams_vision is not None
  1166. return self._find_param(self.hparams_vision, keys, optional)
  1167. def find_aparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1168. assert self.hparams_audio is not None
  1169. return self._find_param(self.hparams_audio, keys, optional)
  1170. def _find_param(self, obj: dict[str, Any], keys: Iterable[str], optional: bool = False) -> Any:
  1171. key = next((k for k in keys if k in obj), None)
  1172. if key is not None:
  1173. return obj[key]
  1174. if optional:
  1175. return None
  1176. raise KeyError(f"could not find any of: {keys}")
  1177. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1178. del bid, name, n_dims # unused
  1179. if ".patch_embd.weight" in new_name:
  1180. return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
  1181. return False
  1182. @ModelBase.register("GPTNeoXForCausalLM")
  1183. class GPTNeoXModel(TextModel):
  1184. model_arch = gguf.MODEL_ARCH.GPTNEOX
  1185. def set_gguf_parameters(self):
  1186. block_count = self.hparams["num_hidden_layers"]
  1187. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1188. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1189. self.gguf_writer.add_block_count(block_count)
  1190. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1191. self.gguf_writer.add_rope_dimension_count(
  1192. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  1193. )
  1194. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1195. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  1196. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  1197. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1198. del bid # unused
  1199. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1200. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1201. tensors: list[tuple[str, Tensor]] = []
  1202. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  1203. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1204. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1205. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1206. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1207. data_torch = torch.cat(
  1208. (
  1209. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1210. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1211. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1212. ),
  1213. dim=0,
  1214. )
  1215. logger.info("re-format attention.linear_qkv.weight")
  1216. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  1217. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1218. data_torch = torch.cat(
  1219. (
  1220. qkv_bias[:, 0, :].reshape((n_embed,)),
  1221. qkv_bias[:, 1, :].reshape((n_embed,)),
  1222. qkv_bias[:, 2, :].reshape((n_embed,)),
  1223. ),
  1224. dim=0,
  1225. )
  1226. logger.info("re-format attention.linear_qkv.bias")
  1227. tensors.append((self.map_tensor_name(name), data_torch))
  1228. return tensors
  1229. @ModelBase.register("BloomForCausalLM", "BloomModel")
  1230. class BloomModel(TextModel):
  1231. model_arch = gguf.MODEL_ARCH.BLOOM
  1232. def set_gguf_parameters(self):
  1233. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1234. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1235. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  1236. self.gguf_writer.add_embedding_length(n_embed)
  1237. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  1238. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1239. self.gguf_writer.add_head_count(n_head)
  1240. self.gguf_writer.add_head_count_kv(n_head)
  1241. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1242. self.gguf_writer.add_file_type(self.ftype)
  1243. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1244. del bid # unused
  1245. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1246. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1247. name = re.sub(r'transformer\.', '', name)
  1248. tensors: list[tuple[str, Tensor]] = []
  1249. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  1250. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1251. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1252. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1253. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1254. data_torch = torch.cat(
  1255. (
  1256. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1257. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1258. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1259. ),
  1260. dim=0,
  1261. )
  1262. logger.info("re-format attention.linear_qkv.weight")
  1263. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  1264. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1265. data_torch = torch.cat(
  1266. (
  1267. qkv_bias[:, 0, :].reshape((n_embed,)),
  1268. qkv_bias[:, 1, :].reshape((n_embed,)),
  1269. qkv_bias[:, 2, :].reshape((n_embed,)),
  1270. ),
  1271. dim=0,
  1272. )
  1273. logger.info("re-format attention.linear_qkv.bias")
  1274. tensors.append((self.map_tensor_name(name), data_torch))
  1275. return tensors
  1276. @ModelBase.register("MPTForCausalLM")
  1277. class MPTModel(TextModel):
  1278. model_arch = gguf.MODEL_ARCH.MPT
  1279. def set_vocab(self):
  1280. try:
  1281. self._set_vocab_gpt2()
  1282. except Exception:
  1283. # Fallback for SEA-LION model
  1284. self._set_vocab_sentencepiece()
  1285. self.gguf_writer.add_add_bos_token(False)
  1286. self.gguf_writer.add_pad_token_id(3)
  1287. self.gguf_writer.add_eos_token_id(1)
  1288. self.gguf_writer.add_unk_token_id(0)
  1289. def set_gguf_parameters(self):
  1290. block_count = self.hparams["n_layers"]
  1291. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1292. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1293. self.gguf_writer.add_block_count(block_count)
  1294. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  1295. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1296. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  1297. self.gguf_writer.add_head_count_kv(kv_n_heads)
  1298. self.gguf_writer.add_layer_norm_eps(1e-5)
  1299. if self.hparams["attn_config"]["clip_qkv"] is not None:
  1300. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  1301. if self.hparams["attn_config"]["alibi"]:
  1302. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  1303. else:
  1304. self.gguf_writer.add_max_alibi_bias(0.0)
  1305. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1306. del bid # unused
  1307. if "scales" in name:
  1308. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  1309. new_name = new_name.replace("scales", "act.scales")
  1310. else:
  1311. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  1312. return [(new_name, data_torch)]
  1313. @ModelBase.register("OrionForCausalLM")
  1314. class OrionModel(TextModel):
  1315. model_arch = gguf.MODEL_ARCH.ORION
  1316. def set_vocab(self):
  1317. self._set_vocab_sentencepiece()
  1318. def set_gguf_parameters(self):
  1319. block_count = self.hparams["num_hidden_layers"]
  1320. head_count = self.hparams["num_attention_heads"]
  1321. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1322. ctx_length = 0
  1323. if "max_sequence_length" in self.hparams:
  1324. ctx_length = self.hparams["max_sequence_length"]
  1325. elif "max_position_embeddings" in self.hparams:
  1326. ctx_length = self.hparams["max_position_embeddings"]
  1327. elif "model_max_length" in self.hparams:
  1328. ctx_length = self.hparams["model_max_length"]
  1329. else:
  1330. raise ValueError("gguf: can not find ctx length parameter.")
  1331. self.gguf_writer.add_file_type(self.ftype)
  1332. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1333. self.gguf_writer.add_context_length(ctx_length)
  1334. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1335. self.gguf_writer.add_block_count(block_count)
  1336. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1337. self.gguf_writer.add_head_count(head_count)
  1338. self.gguf_writer.add_head_count_kv(head_count_kv)
  1339. # note: config provides rms norm but it is actually layer norm
  1340. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  1341. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  1342. @ModelBase.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  1343. class BaichuanModel(TextModel):
  1344. model_arch = gguf.MODEL_ARCH.BAICHUAN
  1345. def set_vocab(self):
  1346. self._set_vocab_sentencepiece()
  1347. def set_gguf_parameters(self):
  1348. block_count = self.hparams["num_hidden_layers"]
  1349. head_count = self.hparams["num_attention_heads"]
  1350. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1351. ctx_length = 0
  1352. if "max_sequence_length" in self.hparams:
  1353. ctx_length = self.hparams["max_sequence_length"]
  1354. elif "max_position_embeddings" in self.hparams:
  1355. ctx_length = self.hparams["max_position_embeddings"]
  1356. elif "model_max_length" in self.hparams:
  1357. ctx_length = self.hparams["model_max_length"]
  1358. else:
  1359. raise ValueError("gguf: can not find ctx length parameter.")
  1360. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1361. self.gguf_writer.add_context_length(ctx_length)
  1362. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1363. self.gguf_writer.add_block_count(block_count)
  1364. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1365. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1366. self.gguf_writer.add_head_count(head_count)
  1367. self.gguf_writer.add_head_count_kv(head_count_kv)
  1368. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1369. self.gguf_writer.add_file_type(self.ftype)
  1370. rope_scaling = self.hparams.get("rope_scaling") or {}
  1371. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1372. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1373. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1374. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1375. head_count = self.hparams["num_attention_heads"]
  1376. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1377. tensors: list[tuple[str, Tensor]] = []
  1378. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  1379. logger.info(f"Unpacking and permuting layer {bid}")
  1380. tensors = [
  1381. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  1382. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  1383. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  1384. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  1385. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  1386. self._reverse_hf_part(data_torch, 2)),
  1387. ]
  1388. else:
  1389. tensors = [(self.map_tensor_name(name), data_torch)]
  1390. return tensors
  1391. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1392. if n_kv_head is not None and n_head != n_kv_head:
  1393. n_head //= n_kv_head
  1394. return (
  1395. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1396. .swapaxes(1, 2)
  1397. .reshape(weights.shape)
  1398. )
  1399. def _reverse_hf_permute_part(
  1400. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  1401. ) -> Tensor:
  1402. r = weights.shape[0] // 3
  1403. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  1404. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  1405. r = weights.shape[0] // 3
  1406. return weights[r * n_part:r * n_part + r, ...]
  1407. @ModelBase.register("XverseForCausalLM")
  1408. class XverseModel(TextModel):
  1409. model_arch = gguf.MODEL_ARCH.XVERSE
  1410. def set_vocab(self):
  1411. assert (self.dir_model / "tokenizer.json").is_file()
  1412. dir_model = self.dir_model
  1413. hparams = self.hparams
  1414. tokens: list[bytes] = []
  1415. toktypes: list[int] = []
  1416. from transformers import AutoTokenizer
  1417. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  1418. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  1419. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  1420. # because vocab_size is the count of items, and indexes start at 0.
  1421. max_vocab_index = max(tokenizer.get_vocab().values())
  1422. if max_vocab_index >= vocab_size:
  1423. raise ValueError("Vocabulary size exceeds expected maximum size.")
  1424. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  1425. added_vocab = tokenizer.get_added_vocab()
  1426. for token_id in range(vocab_size):
  1427. token_text = reverse_vocab[token_id].encode('utf-8')
  1428. # replace "\x00" to string with length > 0
  1429. if token_text == b"\x00":
  1430. toktype = gguf.TokenType.BYTE # special
  1431. token_text = f"<{token_text}>".encode('utf-8')
  1432. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  1433. toktype = gguf.TokenType.BYTE # special
  1434. elif reverse_vocab[token_id] in added_vocab:
  1435. if tokenizer.added_tokens_decoder[token_id].special:
  1436. toktype = gguf.TokenType.CONTROL
  1437. else:
  1438. toktype = gguf.TokenType.USER_DEFINED
  1439. else:
  1440. toktype = gguf.TokenType.NORMAL
  1441. tokens.append(token_text)
  1442. toktypes.append(toktype)
  1443. self.gguf_writer.add_tokenizer_model("llama")
  1444. self.gguf_writer.add_tokenizer_pre("default")
  1445. self.gguf_writer.add_token_list(tokens)
  1446. self.gguf_writer.add_token_types(toktypes)
  1447. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  1448. special_vocab.add_to_gguf(self.gguf_writer)
  1449. def set_gguf_parameters(self):
  1450. block_count = self.hparams["num_hidden_layers"]
  1451. head_count = self.hparams["num_attention_heads"]
  1452. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1453. ctx_length = 0
  1454. if "max_sequence_length" in self.hparams:
  1455. ctx_length = self.hparams["max_sequence_length"]
  1456. elif "max_position_embeddings" in self.hparams:
  1457. ctx_length = self.hparams["max_position_embeddings"]
  1458. elif "model_max_length" in self.hparams:
  1459. ctx_length = self.hparams["model_max_length"]
  1460. else:
  1461. raise ValueError("gguf: can not find ctx length parameter.")
  1462. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1463. self.gguf_writer.add_context_length(ctx_length)
  1464. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1465. self.gguf_writer.add_block_count(block_count)
  1466. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1467. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1468. self.gguf_writer.add_head_count(head_count)
  1469. self.gguf_writer.add_head_count_kv(head_count_kv)
  1470. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1471. self.gguf_writer.add_file_type(self.ftype)
  1472. rope_scaling = self.hparams.get("rope_scaling") or {}
  1473. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1474. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1475. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1476. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1477. del bid # unused
  1478. head_count = self.hparams["num_attention_heads"]
  1479. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1480. # HF models permute some of the tensors, so we need to undo that
  1481. if name.endswith("q_proj.weight"):
  1482. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1483. if name.endswith("k_proj.weight"):
  1484. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1485. return [(self.map_tensor_name(name), data_torch)]
  1486. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1487. if n_kv_head is not None and n_head != n_kv_head:
  1488. n_head //= n_kv_head
  1489. return (
  1490. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1491. .swapaxes(1, 2)
  1492. .reshape(weights.shape)
  1493. )
  1494. @ModelBase.register("FalconForCausalLM", "RWForCausalLM")
  1495. class FalconModel(TextModel):
  1496. model_arch = gguf.MODEL_ARCH.FALCON
  1497. def set_gguf_parameters(self):
  1498. block_count = self.hparams.get("num_hidden_layers")
  1499. if block_count is None:
  1500. block_count = self.hparams["n_layer"] # old name
  1501. n_head = self.hparams.get("num_attention_heads")
  1502. if n_head is None:
  1503. n_head = self.hparams["n_head"] # old name
  1504. n_head_kv = self.hparams.get("num_kv_heads")
  1505. if n_head_kv is None:
  1506. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1507. self.gguf_writer.add_context_length(2048) # not in config.json
  1508. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1509. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1510. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1511. self.gguf_writer.add_block_count(block_count)
  1512. self.gguf_writer.add_head_count(n_head)
  1513. self.gguf_writer.add_head_count_kv(n_head_kv)
  1514. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1515. self.gguf_writer.add_file_type(self.ftype)
  1516. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1517. del bid # unused
  1518. # QKV tensor transform
  1519. # The original query_key_value tensor contains n_head_kv "kv groups",
  1520. # each consisting of n_head/n_head_kv query weights followed by one key
  1521. # and one value weight (shared by all query heads in the kv group).
  1522. # This layout makes it a big pain to work with in GGML.
  1523. # So we rearrange them here,, so that we have n_head query weights
  1524. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1525. # in contiguous fashion.
  1526. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1527. if "query_key_value" in name:
  1528. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1529. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1530. head_dim = self.hparams["hidden_size"] // n_head
  1531. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1532. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1533. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1534. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1535. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1536. return [(self.map_tensor_name(name), data_torch)]
  1537. @ModelBase.register("GPTBigCodeForCausalLM")
  1538. class StarCoderModel(TextModel):
  1539. model_arch = gguf.MODEL_ARCH.STARCODER
  1540. def set_gguf_parameters(self):
  1541. block_count = self.hparams["n_layer"]
  1542. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1543. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1544. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1545. self.gguf_writer.add_block_count(block_count)
  1546. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1547. self.gguf_writer.add_head_count_kv(1)
  1548. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1549. self.gguf_writer.add_file_type(self.ftype)
  1550. @ModelBase.register("GPTRefactForCausalLM")
  1551. class RefactModel(TextModel):
  1552. model_arch = gguf.MODEL_ARCH.REFACT
  1553. def set_vocab(self):
  1554. super().set_vocab()
  1555. # TODO: how to determine special FIM tokens automatically?
  1556. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1557. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1558. special_vocab._set_special_token("prefix", 1)
  1559. special_vocab._set_special_token("suffix", 3)
  1560. special_vocab._set_special_token("middle", 2)
  1561. special_vocab.chat_template = None # do not add it twice
  1562. special_vocab.add_to_gguf(self.gguf_writer)
  1563. def set_gguf_parameters(self):
  1564. hidden_dim = self.hparams["n_embd"]
  1565. inner_dim = 4 * hidden_dim
  1566. hidden_dim = int(2 * inner_dim / 3)
  1567. multiple_of = 256
  1568. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1569. block_count = self.hparams["n_layer"]
  1570. # refact uses Alibi. So this is from config.json which might be used by training.
  1571. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1572. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1573. self.gguf_writer.add_feed_forward_length(ff_dim)
  1574. self.gguf_writer.add_block_count(block_count)
  1575. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1576. self.gguf_writer.add_head_count_kv(1)
  1577. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1578. self.gguf_writer.add_file_type(self.ftype)
  1579. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1580. hidden_dim = self.hparams["n_embd"]
  1581. inner_dim = 4 * hidden_dim
  1582. hidden_dim = int(2 * inner_dim / 3)
  1583. multiple_of = 256
  1584. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1585. n_head = self.hparams["n_head"]
  1586. n_head_kv = 1
  1587. head_dim = self.hparams["n_embd"] // n_head
  1588. tensors: list[tuple[str, Tensor]] = []
  1589. if bid is not None:
  1590. if name == f"transformer.h.{bid}.attn.kv.weight":
  1591. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1592. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1593. elif name == f"transformer.h.{bid}.attn.q.weight":
  1594. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1595. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1596. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1597. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1598. if len(tensors) == 0:
  1599. tensors.append((self.map_tensor_name(name), data_torch))
  1600. return tensors
  1601. @ModelBase.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1602. class StableLMModel(TextModel):
  1603. model_arch = gguf.MODEL_ARCH.STABLELM
  1604. def set_vocab(self):
  1605. if (self.dir_model / "tokenizer.json").is_file():
  1606. self._set_vocab_gpt2()
  1607. else:
  1608. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1609. self._set_vocab_qwen()
  1610. def set_gguf_parameters(self):
  1611. hparams = self.hparams
  1612. block_count = hparams["num_hidden_layers"]
  1613. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1614. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1615. self.gguf_writer.add_block_count(block_count)
  1616. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1617. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1618. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1619. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1620. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1621. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1622. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1623. self.gguf_writer.add_file_type(self.ftype)
  1624. _q_norms: list[dict[str, Tensor]] | None = None
  1625. _k_norms: list[dict[str, Tensor]] | None = None
  1626. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1627. n_head = self.hparams["num_attention_heads"]
  1628. n_kv_head = self.hparams["num_key_value_heads"]
  1629. if name.find("q_layernorm.norms") != -1:
  1630. assert bid is not None
  1631. if self._q_norms is None:
  1632. self._q_norms = [{} for _ in range(self.block_count)]
  1633. self._q_norms[bid][name] = data_torch
  1634. if len(self._q_norms[bid]) >= n_head:
  1635. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1636. else:
  1637. return []
  1638. if name.find("k_layernorm.norms") != -1:
  1639. assert bid is not None
  1640. if self._k_norms is None:
  1641. self._k_norms = [{} for _ in range(self.block_count)]
  1642. self._k_norms[bid][name] = data_torch
  1643. if len(self._k_norms[bid]) >= n_kv_head:
  1644. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1645. else:
  1646. return []
  1647. return [(self.map_tensor_name(name), data_torch)]
  1648. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1649. datas: list[Tensor] = []
  1650. # extract the norms in order
  1651. for xid in range(n_head):
  1652. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1653. datas.append(norms[ename])
  1654. del norms[ename]
  1655. data_torch = torch.stack(datas, dim=0)
  1656. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1657. new_name = self.map_tensor_name(merged_name)
  1658. return [(new_name, data_torch)]
  1659. def prepare_tensors(self):
  1660. super().prepare_tensors()
  1661. if self._q_norms is not None or self._k_norms is not None:
  1662. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1663. norms = (
  1664. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1665. ) + (
  1666. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1667. )
  1668. if len(norms) > 0:
  1669. raise ValueError(f"Unprocessed norms: {norms}")
  1670. @ModelBase.register(
  1671. "LLaMAForCausalLM",
  1672. "LlamaForCausalLM",
  1673. "MistralForCausalLM",
  1674. "MixtralForCausalLM",
  1675. "VLlama3ForCausalLM",
  1676. "LlavaForConditionalGeneration",
  1677. "VoxtralForConditionalGeneration",
  1678. "LlamaModel")
  1679. class LlamaModel(TextModel):
  1680. model_arch = gguf.MODEL_ARCH.LLAMA
  1681. undo_permute = True
  1682. def __init__(self, *args, **kwargs):
  1683. super().__init__(*args, **kwargs)
  1684. # fix for SmolVLM2, missing `num_attention_heads` in config.json
  1685. if self.hf_arch == "VLlama3ForCausalLM":
  1686. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32)
  1687. def _set_vocab_mistral(self):
  1688. vocab = MistralVocab(self.dir_model)
  1689. logger.info(
  1690. f"Converting tokenizer {vocab.tokenizer_type} of size {vocab.vocab_size}."
  1691. )
  1692. self.gguf_writer.add_tokenizer_model(vocab.gguf_tokenizer_model)
  1693. tokens = []
  1694. scores = []
  1695. toktypes = []
  1696. for text, score, toktype in vocab.all_tokens():
  1697. tokens.append(text)
  1698. scores.append(score)
  1699. toktypes.append(toktype)
  1700. assert len(tokens) == vocab.vocab_size, (
  1701. f"token count ({len(tokens)}) != vocab size ({vocab.vocab_size})"
  1702. )
  1703. if vocab.tokenizer_type == MistralTokenizerType.tekken:
  1704. self.gguf_writer.add_tokenizer_pre("tekken")
  1705. self.gguf_writer.add_token_merges(
  1706. vocab.extract_vocab_merges_from_model()
  1707. )
  1708. logger.info(
  1709. f"Setting bos, eos, unk and pad token IDs to {vocab.bos_id}, {vocab.eos_id}, {vocab.unk_id}, {vocab.pad_id}."
  1710. )
  1711. self.gguf_writer.add_bos_token_id(vocab.bos_id)
  1712. self.gguf_writer.add_eos_token_id(vocab.eos_id)
  1713. self.gguf_writer.add_unk_token_id(vocab.unk_id)
  1714. self.gguf_writer.add_pad_token_id(vocab.pad_id)
  1715. self.gguf_writer.add_token_list(tokens)
  1716. self.gguf_writer.add_token_scores(scores)
  1717. self.gguf_writer.add_token_types(toktypes)
  1718. self.gguf_writer.add_vocab_size(vocab.vocab_size)
  1719. self.gguf_writer.add_add_bos_token(True)
  1720. self.gguf_writer.add_add_eos_token(False)
  1721. template_dir = Path(__file__).parent / "models/templates/"
  1722. if not self.is_mistral_format or not self.disable_mistral_community_chat_template:
  1723. # Log only for Mistral format that the official tokenization and detokenization is via `mistral-common`.
  1724. if self.is_mistral_format:
  1725. logger.info(
  1726. "Using a Mistral community chat template. These templates can be subject to errors in early days or weeks after a release. "
  1727. "Mistral recommends to use `mistral-common` to perform tokenization and detokenization."
  1728. )
  1729. template = MistralModel.get_community_chat_template(vocab, template_dir, self.is_mistral_format)
  1730. self.gguf_writer.add_chat_template(template)
  1731. else:
  1732. logger.info("Not using a Mistral community chat template. Ensure to perform the tokenization and detokenization via `mistral-common`.")
  1733. def set_vocab(self):
  1734. if self.is_mistral_format:
  1735. return self._set_vocab_mistral()
  1736. path_tekken_json = self.dir_model / "tekken.json"
  1737. path_tokenizer_json = self.dir_model / "tokenizer.json"
  1738. if path_tekken_json.is_file() and not path_tokenizer_json.is_file():
  1739. self._set_vocab_mistral()
  1740. try:
  1741. self._set_vocab_sentencepiece()
  1742. except FileNotFoundError:
  1743. try:
  1744. self._set_vocab_llama_hf()
  1745. except (FileNotFoundError, TypeError):
  1746. # Llama 3
  1747. self._set_vocab_gpt2()
  1748. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1749. if self.hparams.get("vocab_size", 32000) == 32016:
  1750. special_vocab = gguf.SpecialVocab(
  1751. self.dir_model, load_merges=False,
  1752. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1753. )
  1754. special_vocab._set_special_token("prefix", 32007)
  1755. special_vocab._set_special_token("suffix", 32008)
  1756. special_vocab._set_special_token("middle", 32009)
  1757. special_vocab._set_special_token("eot", 32010)
  1758. special_vocab.add_to_gguf(self.gguf_writer)
  1759. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1760. if tokenizer_config_file.is_file():
  1761. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1762. tokenizer_config_json = json.load(f)
  1763. if "add_prefix_space" in tokenizer_config_json:
  1764. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1765. # Apply to granite small models only
  1766. if self.hparams.get("vocab_size", 32000) == 49152:
  1767. self.gguf_writer.add_add_bos_token(False)
  1768. def set_gguf_parameters(self):
  1769. super().set_gguf_parameters()
  1770. hparams = self.hparams
  1771. if not self.is_mistral_format:
  1772. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1773. if (rope_dim := hparams.get("head_dim")) is None:
  1774. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1775. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1776. rope_scaling = self.hparams.get("rope_scaling") or {}
  1777. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1778. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1779. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1780. @staticmethod
  1781. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1782. if n_head_kv is not None and n_head != n_head_kv:
  1783. n_head = n_head_kv
  1784. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1785. .swapaxes(1, 2)
  1786. .reshape(weights.shape))
  1787. _experts: list[dict[str, Tensor]] | None = None
  1788. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1789. n_head = self.find_hparam(["n_heads", "num_attention_heads"])
  1790. n_kv_head = self.find_hparam(["n_kv_heads", "num_key_value_heads"])
  1791. vision_prefixes = [
  1792. "vision_encoder.",
  1793. "vision_language_adapter.",
  1794. "patch_merger.",
  1795. "pre_mm_projector_norm",
  1796. ]
  1797. is_multimodal_tensor = "vision_tower" in name \
  1798. or "vision_model" in name \
  1799. or "audio_tower" in name \
  1800. or "model.connector" in name \
  1801. or "multi_modal_projector" in name \
  1802. or any(
  1803. name.startswith(prefix)
  1804. for prefix in vision_prefixes
  1805. )
  1806. if is_multimodal_tensor:
  1807. return [] # skip vision tensors
  1808. elif self.hf_arch == "LlamaModel":
  1809. name = "model." + name
  1810. elif name.startswith("model.text_model"):
  1811. name = name.replace("text_model.", "") # for SmolVLM
  1812. elif name.startswith("language_model."):
  1813. name = name.replace("language_model.", "") # for the rest
  1814. if self.undo_permute:
  1815. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1816. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1817. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1818. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1819. # process the experts separately
  1820. if name.find("block_sparse_moe.experts") != -1:
  1821. n_experts = self.hparams["num_local_experts"]
  1822. assert bid is not None
  1823. if self._experts is None:
  1824. self._experts = [{} for _ in range(self.block_count)]
  1825. self._experts[bid][name] = data_torch
  1826. if len(self._experts[bid]) >= n_experts * 3:
  1827. tensors: list[tuple[str, Tensor]] = []
  1828. # merge the experts into a single 3d tensor
  1829. for wid in ["w1", "w2", "w3"]:
  1830. datas: list[Tensor] = []
  1831. for xid in range(n_experts):
  1832. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1833. datas.append(self._experts[bid][ename])
  1834. del self._experts[bid][ename]
  1835. data_torch = torch.stack(datas, dim=0)
  1836. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1837. new_name = self.map_tensor_name(merged_name)
  1838. tensors.append((new_name, data_torch))
  1839. return tensors
  1840. else:
  1841. return []
  1842. return [(self.map_tensor_name(name), data_torch)]
  1843. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1844. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1845. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1846. base = self.hparams.get("rope_theta", 10000.0)
  1847. if (dim := self.hparams.get("head_dim")) is None:
  1848. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  1849. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1850. factor = rope_scaling.get("factor", 8.0)
  1851. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1852. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1853. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1854. low_freq_wavelen = old_context_len / low_freq_factor
  1855. high_freq_wavelen = old_context_len / high_freq_factor
  1856. # assert low_freq_wavelen != high_freq_wavelen # Errors for Llama4
  1857. rope_factors = []
  1858. for freq in freqs:
  1859. wavelen = 2 * math.pi / freq
  1860. if wavelen < high_freq_wavelen:
  1861. rope_factors.append(1)
  1862. elif wavelen > low_freq_wavelen:
  1863. rope_factors.append(factor)
  1864. else:
  1865. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1866. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1867. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1868. def prepare_tensors(self):
  1869. super().prepare_tensors()
  1870. if self._experts is not None:
  1871. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1872. experts = [k for d in self._experts for k in d.keys()]
  1873. if len(experts) > 0:
  1874. raise ValueError(f"Unprocessed experts: {experts}")
  1875. @ModelBase.register("ArceeForCausalLM")
  1876. class ArceeModel(LlamaModel):
  1877. model_arch = gguf.MODEL_ARCH.ARCEE
  1878. def set_gguf_parameters(self):
  1879. super().set_gguf_parameters()
  1880. self._try_set_pooling_type()
  1881. rope_scaling = self.hparams.get("rope_scaling") or {}
  1882. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  1883. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  1884. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1885. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  1886. @ModelBase.register(
  1887. "LlavaForConditionalGeneration", # pixtral
  1888. "Mistral3ForConditionalGeneration", # mistral small 3.1
  1889. )
  1890. class LlavaVisionModel(MmprojModel):
  1891. img_break_tok_id = -1
  1892. def __init__(self, *args, **kwargs):
  1893. super().__init__(*args, **kwargs)
  1894. if self.hparams.get("model_type") == "pixtral":
  1895. # layer_norm_eps is not in config.json, it is hard-coded in modeling_pixtral.py
  1896. self.hparams["layer_norm_eps"] = self.hparams.get("layer_norm_eps", 1e-5)
  1897. self.img_break_tok_id = self.get_token_id("[IMG_BREAK]")
  1898. elif self.is_mistral_format:
  1899. # hparams is already vision config here so norm_eps is only defined in global_config.
  1900. self.hparams["norm_eps"] = self.global_config.get("norm_eps", None)
  1901. assert self.hparams["norm_eps"] is not None, "norm_eps not found in params.json"
  1902. self.img_break_tok_id = self.find_vparam(["image_break_token_id"])
  1903. else:
  1904. raise ValueError(f"Unsupported model type: {self.hparams['model_type']}")
  1905. logger.info(f"Image break token id: {self.img_break_tok_id}")
  1906. def get_token_id(self, token: str) -> int:
  1907. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1908. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1909. added_tokens_decoder = json.load(f)['added_tokens_decoder']
  1910. for id_, token_data in added_tokens_decoder.items():
  1911. if token_data["content"] == token:
  1912. return int(id_)
  1913. raise ValueError(f"Token '{token}' not found in tokenizer config.")
  1914. def set_gguf_parameters(self):
  1915. super().set_gguf_parameters()
  1916. hparams = self.hparams
  1917. if hparams.get("model_type") == "pixtral":
  1918. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PIXTRAL)
  1919. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  1920. # hidden_act
  1921. if hparams["hidden_act"] == "silu":
  1922. self.gguf_writer.add_vision_use_silu(True)
  1923. elif hparams["hidden_act"] == "gelu":
  1924. self.gguf_writer.add_vision_use_gelu(True)
  1925. else:
  1926. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  1927. # spatial_merge_size
  1928. if "spatial_merge_size" in self.global_config:
  1929. self.gguf_writer.add_vision_spatial_merge_size(self.global_config["spatial_merge_size"])
  1930. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1931. del bid # unused
  1932. n_head = (
  1933. self.hparams["num_attention_heads"] if not self.is_mistral_format else self.find_vparam(["num_attention_heads"])
  1934. )
  1935. n_kv_head = n_head
  1936. valid_prefixes = (
  1937. "multi_modal_projector.",
  1938. "vision_tower.",
  1939. "vision_encoder.",
  1940. "vision_language_adapter.",
  1941. "patch_merger.",
  1942. "pre_mm_projector_norm",
  1943. )
  1944. if any(name.startswith(prefix) for prefix in valid_prefixes):
  1945. # process vision tensors
  1946. if name.endswith(("q_proj.weight", "q_proj.bias")) and not self.is_mistral_format:
  1947. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1948. if name.endswith(("k_proj.weight", "k_proj.bias")) and not self.is_mistral_format:
  1949. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1950. return [(self.map_tensor_name(name), data_torch)]
  1951. embed_key = "embed_tokens.weight" if not self.is_mistral_format else "tok_embeddings.weight"
  1952. if self.img_break_tok_id > 0 and embed_key in name:
  1953. logger.info(f"Extracting [IMG_BREAK] token embedding from {name}")
  1954. # for pixtral model, we need to extract the [IMG_BREAK] token embedding
  1955. img_break_embd = data_torch[self.img_break_tok_id]
  1956. name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK]
  1957. return [(self.map_tensor_name(name), img_break_embd)]
  1958. return [] # skip other tensors
  1959. @ModelBase.register("Idefics3ForConditionalGeneration", "SmolVLMForConditionalGeneration")
  1960. class SmolVLMModel(MmprojModel):
  1961. def __init__(self, *args, **kwargs):
  1962. super().__init__(*args, **kwargs)
  1963. if self.hparams["model_type"] == "smolvlm_vision":
  1964. # fix for SmolVLM2, missing some keys in config.json
  1965. # default values are taken from transformers code
  1966. self.hparams["hidden_size"] = self.hparams.get("hidden_size", 1152)
  1967. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 16)
  1968. self.hparams["intermediate_size"] = self.hparams.get("intermediate_size", 3072)
  1969. def set_gguf_parameters(self):
  1970. super().set_gguf_parameters()
  1971. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.IDEFICS3)
  1972. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  1973. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("scale_factor", 2))
  1974. self.gguf_writer.add_vision_use_gelu(True)
  1975. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1976. if ".embeddings." in name:
  1977. return gguf.GGMLQuantizationType.F32
  1978. return super().tensor_force_quant(name, new_name, bid, n_dims)
  1979. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1980. del bid # unused
  1981. is_vision_tensor = "vision_tower" in name or "vision_model" in name or "model.connector" in name
  1982. if is_vision_tensor:
  1983. return [(self.map_tensor_name(name), data_torch)]
  1984. return [] # skip other tensors
  1985. @ModelBase.register(
  1986. "Llama4ForConditionalGeneration",
  1987. "Llama4ForCausalLM",
  1988. )
  1989. class Llama4Model(LlamaModel):
  1990. model_arch = gguf.MODEL_ARCH.LLAMA4
  1991. undo_permute = False
  1992. def __init__(self, *args, **kwargs):
  1993. super().__init__(*args, **kwargs)
  1994. # IMPORTANT: the normal "intermediate_size" is renamed to "intermediate_size_mlp", we need to undo this
  1995. self.hparams["intermediate_size_moe"] = self.hparams["intermediate_size"]
  1996. self.hparams["intermediate_size"] = self.hparams["intermediate_size_mlp"]
  1997. def set_vocab(self):
  1998. self._set_vocab_gpt2()
  1999. def set_gguf_parameters(self):
  2000. super().set_gguf_parameters()
  2001. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["interleave_moe_layer_step"])
  2002. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
  2003. if "layer_types" in self.hparams:
  2004. if all(lt == "full_attention" for lt in self.hparams["layer_types"]):
  2005. # all layers are full attention (for MobileLLM), disable swa
  2006. self.gguf_writer.add_sliding_window(0)
  2007. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  2008. if name.startswith("language_model."):
  2009. name = name.replace("language_model.", "")
  2010. # split the gate_up into gate and up
  2011. if "gate_up_proj" in name:
  2012. name_up = name.replace("gate_up_proj", "up_proj.weight")
  2013. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  2014. dim_half = data_torch.shape[-1] // 2
  2015. gate_proj_weight, up_proj_weight = data_torch.transpose(-1, -2).split(dim_half, dim=-2)
  2016. return [
  2017. (self.map_tensor_name(name_gate), gate_proj_weight),
  2018. (self.map_tensor_name(name_up), up_proj_weight)
  2019. ]
  2020. if name.endswith("down_proj"):
  2021. name += ".weight"
  2022. data_torch = data_torch.transpose(-1, -2)
  2023. if "multi_modal_projector" in name or "vision_model" in name:
  2024. return []
  2025. return super().modify_tensors(data_torch, name, bid)
  2026. @ModelBase.register("Llama4ForConditionalGeneration")
  2027. class Llama4VisionModel(MmprojModel):
  2028. def set_gguf_parameters(self):
  2029. super().set_gguf_parameters()
  2030. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.LLAMA4)
  2031. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams["norm_eps"])
  2032. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / self.hparams["pixel_shuffle_ratio"]))
  2033. assert self.hparams["hidden_act"] == "gelu"
  2034. self.gguf_writer.add_vision_use_gelu(True)
  2035. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2036. del bid # unused
  2037. if "multi_modal_projector" in name or "vision_model" in name:
  2038. # process vision tensors
  2039. if "positional_embedding_vlm" in name and ".weight" not in name:
  2040. name += ".weight"
  2041. if "multi_modal_projector.linear_1" in name:
  2042. # despite the name with number postfix, this is a single fully connected layer
  2043. return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC] + '.weight', data_torch)]
  2044. return [(self.map_tensor_name(name), data_torch)]
  2045. return []
  2046. @ModelBase.register("Mistral3ForConditionalGeneration")
  2047. class Mistral3Model(LlamaModel):
  2048. model_arch = gguf.MODEL_ARCH.LLAMA
  2049. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  2050. name = name.replace("language_model.", "")
  2051. if "multi_modal_projector" in name or "vision_tower" in name:
  2052. return []
  2053. return super().modify_tensors(data_torch, name, bid)
  2054. @ModelBase.register("DeciLMForCausalLM")
  2055. class DeciModel(TextModel):
  2056. model_arch = gguf.MODEL_ARCH.DECI
  2057. @staticmethod
  2058. def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int:
  2059. # DeciLM-specific code
  2060. intermediate_size = int(2 * ffn_mult * n_embd / 3)
  2061. return DeciModel._find_multiple(intermediate_size, 256)
  2062. @staticmethod
  2063. def _find_multiple(n: int, k: int) -> int:
  2064. # DeciLM-specific code
  2065. if n % k == 0:
  2066. return n
  2067. return n + k - (n % k)
  2068. def __init__(self, *args, **kwargs):
  2069. super().__init__(*args, **kwargs)
  2070. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  2071. _block_configs: list[dict[str,Any]] = self.hparams["block_configs"]
  2072. assert self.block_count == len(_block_configs)
  2073. self._num_kv_heads = list()
  2074. self._num_heads = list()
  2075. _ffn_multipliers = list()
  2076. # ***linear attention layer***
  2077. # if n_heads_in_group is None and replace_with_linear is True
  2078. # then _num_kv_heads[il] is 0 and _num_heads[il] is num_attention_heads
  2079. # ***attention-free layer***
  2080. # if n_heads_in_group is None and replace_with_linear is False
  2081. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0
  2082. # ***normal attention-layer***
  2083. # if n_heads_in_group is not None, then
  2084. # _num_kv_heads[il] is num_attention_head // n_heads_in_group and
  2085. # _num_heads[il] is num_attention_head
  2086. # ***dummy layer*** for nemotron 253B
  2087. # if n_heads_in_group is None and ffn_mult is None
  2088. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
  2089. for il in range(len(_block_configs)):
  2090. if _block_configs[il]["attention"]["n_heads_in_group"] is None:
  2091. if _block_configs[il]["attention"]["replace_with_linear"] is True:
  2092. self._num_kv_heads.append(0)
  2093. self._num_heads.append(self.hparams["num_attention_heads"])
  2094. else:
  2095. self._num_kv_heads.append(0)
  2096. self._num_heads.append(0)
  2097. else:
  2098. self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
  2099. self._num_heads.append(self.hparams["num_attention_heads"])
  2100. if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
  2101. _ffn_multipliers.append(0.0)
  2102. else:
  2103. _ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
  2104. assert self.block_count == len(self._num_kv_heads)
  2105. assert self.block_count == len(self._num_heads)
  2106. assert self.block_count == len(_ffn_multipliers)
  2107. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  2108. assert isinstance(self._num_heads, list) and isinstance(self._num_heads[0], int)
  2109. assert isinstance(_ffn_multipliers, list) and isinstance(_ffn_multipliers[0], float)
  2110. self._ffn_dims: list[int] = [
  2111. DeciModel._ffn_mult_to_intermediate_size(multiplier, self.hparams["hidden_size"])
  2112. for multiplier in _ffn_multipliers
  2113. ]
  2114. def set_vocab(self):
  2115. # Please change tokenizer_config.json of Llama-3_1-Nemotron-51B's
  2116. # eos_token from '|eot_id|' to '|end_of_text|'
  2117. if self.hparams.get("vocab_size", 128256) == 128256:
  2118. tokens, toktypes, tokpre = self.get_vocab_base()
  2119. self.gguf_writer.add_tokenizer_model("gpt2")
  2120. self.gguf_writer.add_tokenizer_pre(tokpre)
  2121. self.gguf_writer.add_token_list(tokens)
  2122. self.gguf_writer.add_token_types(toktypes)
  2123. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  2124. special_vocab.add_to_gguf(self.gguf_writer)
  2125. else:
  2126. # DeciLM-7B
  2127. self._set_vocab_llama_hf()
  2128. def set_gguf_parameters(self):
  2129. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  2130. assert self.block_count == len(self._num_kv_heads)
  2131. assert self.block_count == len(self._num_heads)
  2132. assert self.block_count == len(self._ffn_dims)
  2133. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  2134. self.gguf_writer.add_rope_freq_base(rope_theta)
  2135. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2136. self.gguf_writer.add_head_count(self._num_heads)
  2137. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  2138. self.gguf_writer.add_block_count(self.block_count)
  2139. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2140. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2141. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2142. self.gguf_writer.add_key_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2143. self.gguf_writer.add_value_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2144. self.gguf_writer.add_file_type(self.ftype)
  2145. else: # DeciLM-7B
  2146. super().set_gguf_parameters()
  2147. if "num_key_value_heads_per_layer" in self.hparams: # DeciLM-7B
  2148. self._num_kv_heads: list[int] = self.hparams["num_key_value_heads_per_layer"]
  2149. assert self.block_count == len(self._num_kv_heads)
  2150. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2151. hparams = self.hparams
  2152. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2153. if (rope_dim := hparams.get("head_dim")) is None:
  2154. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  2155. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2156. rope_scaling = self.hparams.get("rope_scaling") or {}
  2157. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  2158. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2159. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2160. @staticmethod
  2161. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  2162. if n_head_kv is not None and n_head != n_head_kv:
  2163. n_head = n_head_kv
  2164. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2165. .swapaxes(1, 2)
  2166. .reshape(weights.shape))
  2167. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2168. n_head = self.hparams["num_attention_heads"]
  2169. if bid is not None:
  2170. if "num_key_value_heads_per_layer" in self.hparams:
  2171. n_kv_head = self.hparams["num_key_value_heads_per_layer"][bid]
  2172. elif "block_configs" in self.hparams:
  2173. n_kv_head = self._num_kv_heads[bid]
  2174. n_head = self._num_heads[bid]
  2175. else:
  2176. n_kv_head = self.hparams.get("num_key_value_heads")
  2177. else:
  2178. n_kv_head = self.hparams.get("num_key_value_heads")
  2179. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2180. data_torch = DeciModel.permute(data_torch, n_head, n_head)
  2181. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2182. data_torch = DeciModel.permute(data_torch, n_head, n_kv_head)
  2183. return [(self.map_tensor_name(name), data_torch)]
  2184. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2185. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  2186. if rope_scaling.get("rope_type", '').lower() == "llama3":
  2187. base = self.hparams.get("rope_theta", 10000.0)
  2188. if (dim := self.hparams.get("head_dim")) is None:
  2189. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2190. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  2191. factor = rope_scaling.get("factor", 8.0)
  2192. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  2193. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  2194. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  2195. low_freq_wavelen = old_context_len / low_freq_factor
  2196. high_freq_wavelen = old_context_len / high_freq_factor
  2197. assert low_freq_wavelen != high_freq_wavelen
  2198. rope_factors = []
  2199. for freq in freqs:
  2200. wavelen = 2 * math.pi / freq
  2201. if wavelen < high_freq_wavelen:
  2202. rope_factors.append(1)
  2203. elif wavelen > low_freq_wavelen:
  2204. rope_factors.append(factor)
  2205. else:
  2206. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  2207. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  2208. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  2209. def prepare_tensors(self):
  2210. super().prepare_tensors()
  2211. @ModelBase.register("BitnetForCausalLM")
  2212. class BitnetModel(TextModel):
  2213. model_arch = gguf.MODEL_ARCH.BITNET
  2214. def set_vocab(self):
  2215. self._set_vocab_sentencepiece()
  2216. def set_gguf_parameters(self):
  2217. super().set_gguf_parameters()
  2218. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2219. self.gguf_writer.add_rope_scaling_factor(1.0)
  2220. def weight_quant(self, weight: Tensor) -> Tensor:
  2221. dtype = weight.dtype
  2222. weight = weight.float()
  2223. scale = weight.abs().mean().clamp(min=1e-5)
  2224. iscale = 1 / scale
  2225. # TODO: multiply by the scale directly instead of inverting it twice
  2226. # (this is also unnecessarily doubly inverted upstream)
  2227. # ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
  2228. result = (weight * iscale).round().clamp(-1, 1) / iscale
  2229. return result.type(dtype)
  2230. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2231. new_name = self.map_tensor_name(name)
  2232. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  2233. gguf.MODEL_TENSOR.ATTN_Q,
  2234. gguf.MODEL_TENSOR.ATTN_K,
  2235. gguf.MODEL_TENSOR.ATTN_V,
  2236. gguf.MODEL_TENSOR.ATTN_OUT,
  2237. gguf.MODEL_TENSOR.FFN_UP,
  2238. gguf.MODEL_TENSOR.FFN_DOWN,
  2239. gguf.MODEL_TENSOR.FFN_GATE,
  2240. ]):
  2241. # transform weight into 1/0/-1 (in fp32)
  2242. data_torch = self.weight_quant(data_torch)
  2243. yield (new_name, data_torch)
  2244. @ModelBase.register("GrokForCausalLM", "Grok1ForCausalLM")
  2245. class GrokModel(TextModel):
  2246. model_arch = gguf.MODEL_ARCH.GROK
  2247. def set_vocab(self):
  2248. if (self.dir_model / 'tokenizer.model').is_file():
  2249. self._set_vocab_sentencepiece()
  2250. return
  2251. if not (self.dir_model / 'tokenizer.json').is_file() or not (self.dir_model / 'chat_template.jinja').is_file():
  2252. logger.error('Error: Missing vocab and chat template, download files from https://huggingface.co/alvarobartt/grok-2-tokenizer')
  2253. sys.exit(1)
  2254. self._set_vocab_gpt2()
  2255. def __init__(self, *args, **kwargs):
  2256. super().__init__(*args, **kwargs)
  2257. def set_gguf_parameters(self):
  2258. super().set_gguf_parameters()
  2259. self.gguf_writer.add_attn_logit_softcapping(self.hparams.get("attn_logit_softcapping", 30.0))
  2260. self.gguf_writer.add_router_logit_softcapping(self.hparams.get("router_logit_softcapping", 30.0))
  2261. if (final_logit_softcap := self.hparams.get("final_logit_softcapping")):
  2262. self.gguf_writer.add_final_logit_softcapping(final_logit_softcap)
  2263. if (rope_dim := self.hparams.get("head_dim")) is None:
  2264. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2265. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2266. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2267. # Treat "original" as "yarn", seems to have been a mistake
  2268. if self.hparams.get("rope_type") in ("yarn", "original"):
  2269. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2270. self.gguf_writer.add_rope_scaling_factor(self.hparams["scaling_factor"])
  2271. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["original_max_position_embeddings"])
  2272. self.gguf_writer.add_rope_scaling_yarn_ext_factor(self.hparams["extrapolation_factor"])
  2273. self.gguf_writer.add_rope_scaling_yarn_attn_factor(self.hparams["attn_factor"])
  2274. self.gguf_writer.add_rope_scaling_yarn_beta_fast(self.hparams["beta_fast"])
  2275. self.gguf_writer.add_rope_scaling_yarn_beta_slow(self.hparams["beta_slow"])
  2276. if temp_len := self.hparams.get("attn_temperature_len"):
  2277. self.gguf_writer.add_attn_temperature_length(temp_len)
  2278. self.gguf_writer.add_attn_output_scale(self.hparams.get("attn_output_multiplier", rope_dim**-0.5))
  2279. self.gguf_writer.add_embedding_scale(self.hparams["embedding_multiplier_scale"])
  2280. self.gguf_writer.add_logit_scale(self.hparams["output_multiplier_scale"])
  2281. _experts: list[dict[str, list[Tensor]]] | None = None
  2282. _cur_expert = ""
  2283. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2284. tensors: list[tuple[str, Tensor]] = []
  2285. is_expert = ".moe." in name or ".block_sparse_moe.experts." in name
  2286. if not is_expert:
  2287. tensors.append((self.map_tensor_name(name), data_torch))
  2288. # process the experts separately
  2289. if is_expert or self._cur_expert:
  2290. n_experts = self.hparams["num_local_experts"]
  2291. assert bid is not None
  2292. if self._experts is None:
  2293. self._experts = [{} for _ in range(self.block_count)]
  2294. # concatenate split tensors
  2295. if name in self._experts[bid]:
  2296. self._cur_expert = name
  2297. self._experts[bid][name].append(data_torch)
  2298. return []
  2299. elif is_expert:
  2300. self._cur_expert = name
  2301. self._experts[bid][name] = [data_torch]
  2302. return []
  2303. else:
  2304. self._cur_expert = ""
  2305. for bid in range(self.block_count):
  2306. if len(self._experts[bid]) >= n_experts * 3:
  2307. # merge the experts into a single 3d tensor
  2308. for wid in [("linear", "w1", 0), ("linear_1", "w2", 1), ("linear_v", "w3", 0)]:
  2309. datas: list[Tensor] = []
  2310. for xid in range(n_experts):
  2311. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid[0]}.weight"
  2312. if ename not in self._experts[bid]:
  2313. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid[1]}.weight"
  2314. tensor_list = self._experts[bid][ename]
  2315. datas.append(torch.cat(tensor_list, dim=wid[2]) if len(tensor_list) > 1 else tensor_list[0])
  2316. del self._experts[bid][ename]
  2317. data_torch = torch.stack(datas, dim=0)
  2318. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid[0]}.weight"
  2319. new_name = self.map_tensor_name(merged_name)
  2320. yield (new_name, data_torch)
  2321. yield from tensors
  2322. @ModelBase.register("DbrxForCausalLM")
  2323. class DbrxModel(TextModel):
  2324. model_arch = gguf.MODEL_ARCH.DBRX
  2325. def set_gguf_parameters(self):
  2326. ffn_config = self.hparams["ffn_config"]
  2327. attn_config = self.hparams["attn_config"]
  2328. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  2329. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  2330. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  2331. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  2332. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  2333. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  2334. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  2335. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  2336. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  2337. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  2338. self.gguf_writer.add_layer_norm_eps(1e-5)
  2339. self.gguf_writer.add_file_type(self.ftype)
  2340. logger.info(f"gguf: file type = {self.ftype}")
  2341. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2342. del bid # unused
  2343. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  2344. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  2345. n_embd = self.hparams["d_model"]
  2346. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  2347. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  2348. # But llama.cpp moe graph works differently
  2349. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  2350. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  2351. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2352. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  2353. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2354. experts = False
  2355. for exp_tensor_name in exp_tensor_names.keys():
  2356. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  2357. experts = True
  2358. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  2359. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  2360. data_torch = data_torch.permute(*permute_tensor)
  2361. break
  2362. # map tensor names
  2363. # In MoE models the ffn tensors are typically most of the model weights,
  2364. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  2365. # Every other model has the weight names ending in .weight,
  2366. # let's assume that is the convention which is not the case for dbrx:
  2367. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  2368. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  2369. return [(new_name, data_torch)]
  2370. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  2371. del name, new_name, bid # unused
  2372. return n_dims > 1
  2373. @ModelBase.register("MiniCPMForCausalLM")
  2374. class MiniCPMModel(TextModel):
  2375. model_arch = gguf.MODEL_ARCH.MINICPM
  2376. def set_gguf_parameters(self):
  2377. super().set_gguf_parameters()
  2378. embedding_scale = float(self.hparams["scale_emb"])
  2379. self.gguf_writer.add_embedding_scale(embedding_scale)
  2380. logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}")
  2381. residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5
  2382. self.gguf_writer.add_residual_scale(residual_scale)
  2383. logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}")
  2384. logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"]
  2385. self.gguf_writer.add_logit_scale(logit_scale)
  2386. logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}")
  2387. rope_scaling = self.hparams.get("rope_scaling") or {}
  2388. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "longrope":
  2389. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE)
  2390. logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}")
  2391. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2392. rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2393. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2394. if rope_scaling is not None:
  2395. long_factors = rope_scaling.get('long_factor', None)
  2396. short_factors = rope_scaling.get('short_factor', None)
  2397. if long_factors is None or short_factors is None:
  2398. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2399. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2400. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2401. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2402. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2403. def set_vocab(self):
  2404. self._set_vocab_sentencepiece()
  2405. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2406. del bid # unused
  2407. n_head = self.hparams["num_attention_heads"]
  2408. n_kv_head = self.hparams.get("num_key_value_heads")
  2409. # HF models permute some of the tensors, so we need to undo that
  2410. if name.endswith(("q_proj.weight")):
  2411. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2412. if name.endswith(("k_proj.weight")):
  2413. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2414. return [(self.map_tensor_name(name), data_torch)]
  2415. @ModelBase.register("MiniCPM3ForCausalLM")
  2416. class MiniCPM3Model(TextModel):
  2417. model_arch = gguf.MODEL_ARCH.MINICPM3
  2418. def set_gguf_parameters(self):
  2419. hparams = self.hparams
  2420. self.gguf_writer.add_file_type(self.ftype)
  2421. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2422. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2423. self.gguf_writer.add_block_count(self.block_count)
  2424. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2425. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2426. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  2427. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2428. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2429. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2430. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2431. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2432. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2433. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2434. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2435. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2436. if rope_scaling is not None:
  2437. rope_dims = self.hparams["qk_rope_head_dim"]
  2438. long_factors = rope_scaling.get('long_factor', None)
  2439. short_factors = rope_scaling.get('short_factor', None)
  2440. if long_factors is None or short_factors is None:
  2441. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2442. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2443. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2444. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2445. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2446. def set_vocab(self):
  2447. self._set_vocab_sentencepiece()
  2448. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  2449. if n_kv_head is not None and n_head != n_kv_head:
  2450. n_head //= n_kv_head
  2451. return (
  2452. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2453. .swapaxes(1, 2)
  2454. .reshape(weights.shape)
  2455. )
  2456. @ModelBase.register("QWenLMHeadModel")
  2457. class QwenModel(TextModel):
  2458. model_arch = gguf.MODEL_ARCH.QWEN
  2459. @staticmethod
  2460. def token_bytes_to_string(b):
  2461. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2462. byte_encoder = bytes_to_unicode()
  2463. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2464. @staticmethod
  2465. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2466. parts = [bytes([b]) for b in token]
  2467. while True:
  2468. min_idx = None
  2469. min_rank = None
  2470. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2471. rank = mergeable_ranks.get(pair[0] + pair[1])
  2472. if rank is not None and (min_rank is None or rank < min_rank):
  2473. min_idx = i
  2474. min_rank = rank
  2475. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2476. break
  2477. assert min_idx is not None
  2478. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2479. return parts
  2480. def set_vocab(self):
  2481. self._set_vocab_qwen()
  2482. def set_gguf_parameters(self):
  2483. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2484. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2485. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2486. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2487. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  2488. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2489. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2490. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2491. self.gguf_writer.add_file_type(self.ftype)
  2492. @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM", "Qwen2AudioForConditionalGeneration")
  2493. class Qwen2Model(TextModel):
  2494. model_arch = gguf.MODEL_ARCH.QWEN2
  2495. def set_vocab(self):
  2496. try:
  2497. self._set_vocab_sentencepiece()
  2498. except FileNotFoundError:
  2499. self._set_vocab_gpt2()
  2500. def set_gguf_parameters(self):
  2501. super().set_gguf_parameters()
  2502. self._try_set_pooling_type()
  2503. rope_scaling = self.hparams.get("rope_scaling") or {}
  2504. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2505. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2506. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2507. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2508. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2509. if self.hf_arch == "Qwen2Model":
  2510. name = f"model.{name}" # map to Qwen2ForCausalLM tensors
  2511. if "language_model." in name:
  2512. name = name.replace("language_model.", "") # for InternVL
  2513. if name.startswith("mlp") or name.startswith("multi_modal_projector") \
  2514. or name.startswith("vision_model") or name.startswith("audio_tower") \
  2515. or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector"):
  2516. # skip vision and audio tensors
  2517. return []
  2518. yield from super().modify_tensors(data_torch, name, bid)
  2519. @ModelBase.register("DreamModel")
  2520. class DreamModel(TextModel):
  2521. model_arch = gguf.MODEL_ARCH.DREAM
  2522. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  2523. tokens: list[str] = []
  2524. toktypes: list[int] = []
  2525. from transformers import AutoTokenizer
  2526. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  2527. vocab_dict = tokenizer.get_vocab()
  2528. vocab_size = self.hparams.get("vocab_size", len(vocab_dict))
  2529. assert max(vocab_dict.values()) < vocab_size
  2530. tokpre = self.get_vocab_base_pre(tokenizer)
  2531. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab_dict.items()}
  2532. added_vocab = tokenizer.get_added_vocab()
  2533. for i in range(vocab_size):
  2534. if i not in reverse_vocab:
  2535. tokens.append(f"[PAD{i}]")
  2536. toktypes.append(gguf.TokenType.UNUSED)
  2537. elif reverse_vocab[i] in added_vocab:
  2538. tokens.append(reverse_vocab[i])
  2539. # Check if it's a special token - treat special tokens as CONTROL tokens
  2540. if hasattr(tokenizer, 'added_tokens_decoder') and i in tokenizer.added_tokens_decoder:
  2541. if tokenizer.added_tokens_decoder[i].special:
  2542. toktypes.append(gguf.TokenType.CONTROL)
  2543. else:
  2544. toktypes.append(gguf.TokenType.USER_DEFINED)
  2545. else:
  2546. # Fallback: treat all added vocab as control tokens for special tokens like <|im_start|>
  2547. toktypes.append(gguf.TokenType.CONTROL)
  2548. else:
  2549. tokens.append(reverse_vocab[i])
  2550. toktypes.append(gguf.TokenType.NORMAL)
  2551. return tokens, toktypes, tokpre
  2552. def set_vocab(self):
  2553. try:
  2554. self._set_vocab_sentencepiece()
  2555. except FileNotFoundError:
  2556. self._set_vocab_gpt2()
  2557. def set_gguf_parameters(self):
  2558. super().set_gguf_parameters()
  2559. self._try_set_pooling_type()
  2560. # Dream models use non-causal attention for diffusion
  2561. self.gguf_writer.add_causal_attention(False)
  2562. # Handle RoPE scaling similar to Qwen2
  2563. rope_scaling = self.hparams.get("rope_scaling") or {}
  2564. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2565. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2566. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2567. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2568. # Add Dream-specific parameters
  2569. mask_token_id = self.hparams.get("mask_token_id")
  2570. if mask_token_id is not None:
  2571. self.gguf_writer.add_mask_token_id(mask_token_id)
  2572. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2573. # Dream model tensors should be mapped directly since it's the base model
  2574. yield from super().modify_tensors(data_torch, name, bid)
  2575. @ModelBase.register("LLaDAModelLM")
  2576. class LLaDAModel(TextModel):
  2577. model_arch = gguf.MODEL_ARCH.LLADA
  2578. undo_permute = True
  2579. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  2580. tokens: list[str] = []
  2581. toktypes: list[int] = []
  2582. from transformers import AutoTokenizer
  2583. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  2584. vocab_dict = tokenizer.get_vocab()
  2585. vocab_size = self.hparams.get("vocab_size", len(vocab_dict))
  2586. assert max(vocab_dict.values()) < vocab_size
  2587. tokpre = self.get_vocab_base_pre(tokenizer)
  2588. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab_dict.items()}
  2589. added_vocab = tokenizer.get_added_vocab()
  2590. for i in range(vocab_size):
  2591. if i not in reverse_vocab:
  2592. tokens.append(f"[PAD{i}]")
  2593. toktypes.append(gguf.TokenType.UNUSED)
  2594. elif reverse_vocab[i] in added_vocab:
  2595. tokens.append(reverse_vocab[i])
  2596. # Check if it's a special token - treat special tokens as CONTROL tokens
  2597. if hasattr(tokenizer, 'added_tokens_decoder') and i in tokenizer.added_tokens_decoder:
  2598. if tokenizer.added_tokens_decoder[i].special:
  2599. toktypes.append(gguf.TokenType.CONTROL)
  2600. else:
  2601. toktypes.append(gguf.TokenType.USER_DEFINED)
  2602. else:
  2603. # Fallback: treat all added vocab as control tokens for special tokens like <|im_start|>
  2604. toktypes.append(gguf.TokenType.CONTROL)
  2605. else:
  2606. tokens.append(reverse_vocab[i])
  2607. toktypes.append(gguf.TokenType.NORMAL)
  2608. return tokens, toktypes, tokpre
  2609. def set_vocab(self):
  2610. self._set_vocab_gpt2()
  2611. # LLaDA specific parameters
  2612. self.gguf_writer.add_add_bos_token(True)
  2613. def set_gguf_parameters(self):
  2614. super().set_gguf_parameters()
  2615. self._try_set_pooling_type()
  2616. # Add parameters similar to LlamaModel
  2617. hparams = self.hparams
  2618. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2619. if (rope_dim := hparams.get("head_dim")) is None:
  2620. n_heads = hparams.get("num_attention_heads", hparams.get("n_heads"))
  2621. rope_dim = hparams.get("hidden_size", hparams.get("d_model")) // n_heads
  2622. self.gguf_writer.add_rope_dimension_count(rope_dim)
  2623. # Set context length for LLaDA
  2624. context_length = self.hparams.get("max_sequence_length", 4096)
  2625. self.gguf_writer.add_context_length(context_length)
  2626. # Set embedding length (dimension size)
  2627. embedding_length = self.hparams.get("d_model", 4096)
  2628. self.gguf_writer.add_embedding_length(embedding_length)
  2629. # Set feed forward length (MLP hidden size)
  2630. feed_forward_length = self.hparams.get("mlp_hidden_size", 12288)
  2631. self.gguf_writer.add_feed_forward_length(feed_forward_length)
  2632. # LLaDA models use non-causal attention for diffusion, similar to Dream
  2633. self.gguf_writer.add_causal_attention(False)
  2634. # LLaDA models don't shift their logits
  2635. self.gguf_writer.add_diffusion_shift_logits(False)
  2636. @staticmethod
  2637. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  2638. if n_head_kv is not None and n_head != n_head_kv:
  2639. n_head = n_head_kv
  2640. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2641. .swapaxes(1, 2)
  2642. .reshape(weights.shape))
  2643. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2644. n_head = self.hparams.get("num_attention_heads", self.hparams.get("n_heads"))
  2645. n_kv_head = self.hparams.get("num_key_value_heads", self.hparams.get("n_kv_heads"))
  2646. if self.undo_permute:
  2647. if name.endswith(("q_proj.weight", "q_proj.bias")):
  2648. data_torch = LLaDAModel.permute(data_torch, n_head, n_head)
  2649. if name.endswith(("k_proj.weight", "k_proj.bias")):
  2650. data_torch = LLaDAModel.permute(data_torch, n_head, n_kv_head)
  2651. # LLaDA model tensors should be mapped directly since it's the base model
  2652. yield from super().modify_tensors(data_torch, name, bid)
  2653. @ModelBase.register("Ernie4_5_ForCausalLM", "Ernie4_5ForCausalLM")
  2654. class Ernie4_5Model(TextModel):
  2655. model_arch = gguf.MODEL_ARCH.ERNIE4_5
  2656. def set_vocab(self):
  2657. self._set_vocab_sentencepiece()
  2658. def set_gguf_parameters(self):
  2659. super().set_gguf_parameters()
  2660. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2661. num_heads = self.hparams["num_attention_heads"]
  2662. num_kv_heads = self.hparams["num_key_value_heads"]
  2663. if (head_dim := self.hparams.get("head_dim")) is None:
  2664. head_dim = self.hparams["hidden_size"] // num_heads
  2665. if "ernie." in name:
  2666. name = name.replace("ernie.", "model.")
  2667. # split the qkv weights
  2668. # qkv_proj shape: [(num_heads + 2 * num_kv_heads) * head_dim, hidden_size]
  2669. if "qkv_proj" in name:
  2670. name_q = name.replace("qkv_proj.weight", "q_proj.weight")
  2671. name_k = name.replace("qkv_proj.weight", "k_proj.weight")
  2672. name_v = name.replace("qkv_proj.weight", "v_proj.weight")
  2673. total_q_dim = num_heads * head_dim
  2674. total_k_dim = num_kv_heads * head_dim
  2675. total_v_dim = num_kv_heads * head_dim
  2676. q_proj_weight, k_proj_weight, v_proj_weight = data_torch.split([total_q_dim, total_k_dim, total_v_dim], dim=0)
  2677. return [
  2678. (self.map_tensor_name(name_q), q_proj_weight),
  2679. (self.map_tensor_name(name_k), k_proj_weight),
  2680. (self.map_tensor_name(name_v), v_proj_weight)
  2681. ]
  2682. # split the up_gate_proj into gate and up
  2683. # up_gate_proj shape: [2 * intermediate_size, hidden_size]
  2684. if "up_gate_proj" in name:
  2685. name_up = name.replace("up_gate_proj.weight", "up_proj.weight")
  2686. name_gate = name.replace("up_gate_proj.weight", "gate_proj.weight")
  2687. dim_half = data_torch.shape[0] // 2
  2688. gate_proj_weight, up_proj_weight = data_torch.split(dim_half, dim=0)
  2689. return [
  2690. (self.map_tensor_name(name_gate), gate_proj_weight),
  2691. (self.map_tensor_name(name_up), up_proj_weight)
  2692. ]
  2693. return [(self.map_tensor_name(name), data_torch)]
  2694. @ModelBase.register("Ernie4_5_MoeForCausalLM")
  2695. class Ernie4_5MoeModel(Ernie4_5Model):
  2696. model_arch = gguf.MODEL_ARCH.ERNIE4_5_MOE
  2697. _experts: list[dict[str, Tensor]] | None = None
  2698. def __init__(self, *args, **kwargs):
  2699. super().__init__(*args, **kwargs)
  2700. self._experts = [{} for _ in range(self.block_count)]
  2701. def set_gguf_parameters(self):
  2702. super().set_gguf_parameters()
  2703. self.gguf_writer.add_expert_count(self.hparams["moe_num_experts"])
  2704. self.gguf_writer.add_expert_used_count(self.hparams["moe_k"])
  2705. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["moe_layer_interval"])
  2706. self.gguf_writer.add_leading_dense_block_count(self.hparams["moe_layer_start_index"])
  2707. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2708. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2709. if (shared_expert_count := self.hparams.get('moe_num_shared_experts')) is not None:
  2710. self.gguf_writer.add_expert_shared_count(shared_expert_count)
  2711. if shared_expert_count > 0 and (shared_expert_intermediate_size := self.hparams.get('intermediate_size')) is not None and (num_key_value_heads := self.hparams.get('num_key_value_heads')) is not None:
  2712. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size // num_key_value_heads)
  2713. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2714. # Modify correction bias name as in DeepseekV2
  2715. if name.endswith("e_score_correction_bias"):
  2716. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  2717. # skip Multi-Token Prediction (MTP) layers (again, same as DeepseekV2)
  2718. match = re.match(r"model.mtp_block.(\d+)", name)
  2719. if match:
  2720. return []
  2721. # skip all other MTP tensors for now
  2722. match = re.match(r"model.mtp_emb_norm.(\d+)", name)
  2723. if match:
  2724. return []
  2725. match = re.match(r"model.mtp_hidden_norm.(\d+)", name)
  2726. if match:
  2727. return []
  2728. match = re.match(r"model.mtp_linear_proj.(\d+)", name)
  2729. if match:
  2730. return []
  2731. # process the experts separately
  2732. if name.find("mlp.experts") != -1:
  2733. n_experts = self.hparams["moe_num_experts"]
  2734. assert bid is not None
  2735. if self._experts is None:
  2736. self._experts = [{} for _ in range(self.block_count)]
  2737. self._experts[bid][name] = data_torch
  2738. if len(self._experts[bid]) >= n_experts * 3:
  2739. tensors: list[tuple[str, Tensor]] = []
  2740. # merge the experts into a single 3d tensor
  2741. for w_name in ["gate_proj", "up_proj", "down_proj"]:
  2742. datas: list[Tensor] = []
  2743. for xid in range(n_experts):
  2744. ename_to_retrieve = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2745. datas.append(self._experts[bid][ename_to_retrieve])
  2746. del self._experts[bid][ename_to_retrieve]
  2747. data_torch = torch.stack(datas, dim=0)
  2748. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2749. new_name = self.map_tensor_name(merged_name)
  2750. tensors.append((new_name, data_torch))
  2751. return tensors
  2752. else:
  2753. return []
  2754. return [(self.map_tensor_name(name), data_torch)]
  2755. def prepare_tensors(self):
  2756. super().prepare_tensors()
  2757. if self._experts is not None:
  2758. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2759. experts = [k for d in self._experts for k in d.keys()]
  2760. if len(experts) > 0:
  2761. raise ValueError(f"Unprocessed experts: {experts}")
  2762. @ModelBase.register(
  2763. "Qwen2VLModel",
  2764. "Qwen2VLForConditionalGeneration",
  2765. "Qwen2_5_VLForConditionalGeneration",
  2766. "Qwen2_5OmniModel",
  2767. )
  2768. class Qwen2VLModel(TextModel):
  2769. model_arch = gguf.MODEL_ARCH.QWEN2VL
  2770. def set_gguf_parameters(self):
  2771. super().set_gguf_parameters()
  2772. mrope_section = self.hparams["rope_scaling"]["mrope_section"]
  2773. mrope_section += [0] * max(0, 4 - len(mrope_section))
  2774. self.gguf_writer.add_rope_dimension_sections(mrope_section)
  2775. def set_vocab(self):
  2776. try:
  2777. self._set_vocab_sentencepiece()
  2778. except FileNotFoundError:
  2779. self._set_vocab_gpt2()
  2780. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2781. del bid # unused
  2782. if name.startswith("thinker."):
  2783. name = name.replace("thinker.", "")
  2784. if name.startswith("visual") or name.startswith("audio") or \
  2785. name.startswith("talker") or name.startswith("token2wav"):
  2786. # skip multimodal tensors
  2787. return []
  2788. return [(self.map_tensor_name(name), data_torch)]
  2789. @ModelBase.register("Qwen2VLModel", "Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2790. class Qwen2VLVisionModel(MmprojModel):
  2791. def __init__(self, *args, **kwargs):
  2792. super().__init__(*args, **kwargs)
  2793. assert self.hparams_vision is not None
  2794. self.hparams_vision["image_size"] = self.hparams_vision.get("image_size", 560)
  2795. # rename config.json values
  2796. self.hparams_vision["num_attention_heads"] = self.hparams_vision.get("num_heads")
  2797. self.hparams_vision["num_hidden_layers"] = self.hparams_vision.get("depth")
  2798. if "embed_dim" in self.hparams_vision: # qwen2vl
  2799. self.hparams_vision["intermediate_size"] = self.hparams_vision.get("hidden_size")
  2800. self.hparams_vision["hidden_size"] = self.hparams_vision.get("embed_dim")
  2801. def set_gguf_parameters(self):
  2802. super().set_gguf_parameters()
  2803. assert self.hparams_vision is not None
  2804. hparams = self.hparams_vision
  2805. model_type = self.global_config['model_type']
  2806. if model_type == 'qwen2_vl':
  2807. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL)
  2808. elif model_type == 'qwen2_5_vl' or model_type == 'qwen2_5_omni':
  2809. if model_type == 'qwen2_5_omni':
  2810. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25O)
  2811. else:
  2812. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL)
  2813. self.gguf_writer.add_vision_use_silu(True)
  2814. # find n_wa_pattern (window attention pattern)
  2815. fullatt_block_indexes = hparams.get("fullatt_block_indexes")
  2816. assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
  2817. n_wa_pattern = fullatt_block_indexes[0] + 1
  2818. # validate n_wa_pattern
  2819. for i in range(1, len(fullatt_block_indexes)):
  2820. if fullatt_block_indexes[i] - fullatt_block_indexes[i - 1] != n_wa_pattern:
  2821. raise ValueError(f"Invalid fullatt_block_indexes: {fullatt_block_indexes}")
  2822. self.gguf_writer.add_vision_n_wa_pattern(n_wa_pattern)
  2823. else:
  2824. raise ValueError(f"Unknown QwenVL model type: {self.global_config['model_type']}")
  2825. # default values below are taken from HF tranformers code
  2826. self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
  2827. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2828. if ".position_embd." in new_name:
  2829. return gguf.GGMLQuantizationType.F32
  2830. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2831. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2832. del bid # unused
  2833. if name.startswith("visual."):
  2834. # process visual tensors
  2835. # split QKV tensors if needed
  2836. if ".qkv." in name:
  2837. if data_torch.ndim == 2: # weight
  2838. c3, _ = data_torch.shape
  2839. else: # bias
  2840. c3 = data_torch.shape[0]
  2841. assert c3 % 3 == 0
  2842. c = c3 // 3
  2843. wq = data_torch[:c]
  2844. wk = data_torch[c: c * 2]
  2845. wv = data_torch[c * 2:]
  2846. return [
  2847. (self.map_tensor_name(name.replace("qkv", "q")), wq),
  2848. (self.map_tensor_name(name.replace("qkv", "k")), wk),
  2849. (self.map_tensor_name(name.replace("qkv", "v")), wv),
  2850. ]
  2851. elif 'patch_embed.proj.weight' in name:
  2852. # split Conv3D into Conv2Ds
  2853. c1, c2, kt, kh, kw = data_torch.shape
  2854. del c1, c2, kh, kw # unused
  2855. assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
  2856. return [
  2857. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]),
  2858. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]),
  2859. ]
  2860. else:
  2861. return [(self.map_tensor_name(name), data_torch)]
  2862. return [] # skip other tensors
  2863. @ModelBase.register("Qwen2_5OmniModel")
  2864. class Qwen25OmniModel(Qwen2VLVisionModel):
  2865. has_vision_encoder = True
  2866. has_audio_encoder = True
  2867. def __init__(self, *args, **kwargs):
  2868. super().__init__(*args, **kwargs)
  2869. assert self.hparams_audio is not None
  2870. self.hparams_audio["hidden_size"] = self.hparams_audio["d_model"]
  2871. self.hparams_audio["intermediate_size"] = self.hparams_audio["encoder_ffn_dim"]
  2872. self.hparams_audio["num_attention_heads"] = self.hparams_audio["encoder_attention_heads"]
  2873. def set_gguf_parameters(self):
  2874. super().set_gguf_parameters()
  2875. assert self.hparams_audio is not None
  2876. self.gguf_writer.add_audio_num_mel_bins(self.hparams_audio["num_mel_bins"])
  2877. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams_audio.get("layer_norm_eps", 1e-5))
  2878. def get_vision_config(self) -> dict[str, Any] | None:
  2879. return self.global_config["thinker_config"].get("vision_config")
  2880. def get_audio_config(self) -> dict[str, Any] | None:
  2881. return self.global_config["thinker_config"].get("audio_config")
  2882. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2883. # SinusoidsPositionEmbedding
  2884. assert self.hparams_audio is not None
  2885. max_timescale = 10000
  2886. length = 1500
  2887. channels = self.hparams_audio["hidden_size"]
  2888. log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
  2889. inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float())
  2890. scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
  2891. pos_embd = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1).to(dtype=torch.float32)
  2892. yield ("audio_tower.embed_positions.weight", pos_embd)
  2893. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2894. if ".conv" in name and ".weight" in name:
  2895. return gguf.GGMLQuantizationType.F16
  2896. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2897. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2898. if name.startswith("thinker."):
  2899. name = name.replace("thinker.", "")
  2900. if name.startswith("audio_tower"):
  2901. # process audio tensors
  2902. if "conv1.bias" in name or "conv2.bias" in name:
  2903. # transpose conv1 and conv2 bias
  2904. data_torch = data_torch.unsqueeze(-1)
  2905. if "audio_bos_eos_token" in name:
  2906. # this tensor is left unused in transformers code
  2907. # https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809
  2908. return []
  2909. return [(self.map_tensor_name(name), data_torch)]
  2910. return super().modify_tensors(data_torch, name, bid)
  2911. @ModelBase.register("InternVisionModel")
  2912. class InternVisionModel(MmprojModel):
  2913. def set_gguf_parameters(self):
  2914. assert self.hparams_vision is not None
  2915. if isinstance(self.hparams_vision['image_size'], list):
  2916. self.hparams_vision['image_size'] = self.hparams_vision['image_size'][0]
  2917. if isinstance(self.hparams_vision['patch_size'], list):
  2918. self.hparams_vision['patch_size'] = self.hparams_vision['patch_size'][0]
  2919. super().set_gguf_parameters()
  2920. hparams = self.hparams
  2921. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.INTERNVL)
  2922. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  2923. # hidden_act
  2924. if hparams["hidden_act"] == "silu":
  2925. self.gguf_writer.add_vision_use_silu(True)
  2926. elif hparams["hidden_act"] == "gelu":
  2927. self.gguf_writer.add_vision_use_gelu(True)
  2928. else:
  2929. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  2930. # downsample_ratio
  2931. downsample_ratio = self.global_config.get("downsample_ratio")
  2932. assert downsample_ratio is not None
  2933. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / downsample_ratio))
  2934. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2935. if ".position_embd." in new_name:
  2936. return gguf.GGMLQuantizationType.F32
  2937. return super().tensor_force_quant(name, new_name, bid, n_dims)
  2938. def _mapping_interns1_name(self, name):
  2939. names_map = {
  2940. "model.multi_modal_projector.layer_norm.bias": "mlp1.0.bias",
  2941. "model.multi_modal_projector.layer_norm.weight": "mlp1.0.weight",
  2942. "model.multi_modal_projector.linear_1.bias": "mlp1.1.bias",
  2943. "model.multi_modal_projector.linear_1.weight": "mlp1.1.weight",
  2944. "model.multi_modal_projector.linear_2.bias": "mlp1.3.bias",
  2945. "model.multi_modal_projector.linear_2.weight": "mlp1.3.weight",
  2946. }
  2947. if name in names_map:
  2948. name = names_map[name]
  2949. return name
  2950. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2951. del bid # unused
  2952. vision_prefix = ['vision_model', 'mlp', 'model.vision_tower', 'model.multi_modal_projector']
  2953. # deal with intern-s1 special case
  2954. name = self._mapping_interns1_name(name)
  2955. if any([name.startswith(prefix) for prefix in vision_prefix]):
  2956. # process visual tensors
  2957. # correct name
  2958. if name.startswith("vision_model"):
  2959. name = "vision_tower." + name
  2960. if (".ls" in name or ".lambda_" in name or "position_embedding" in name) and not name.endswith(".weight"):
  2961. name += ".weight"
  2962. # split QKV tensors if needed
  2963. if ".qkv." in name:
  2964. if data_torch.ndim == 2: # weight
  2965. c3, _ = data_torch.shape
  2966. else: # bias
  2967. c3 = data_torch.shape[0]
  2968. assert c3 % 3 == 0
  2969. c = c3 // 3
  2970. wq = data_torch[:c]
  2971. wk = data_torch[c: c * 2]
  2972. wv = data_torch[c * 2:]
  2973. return [
  2974. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.q_proj")), wq),
  2975. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.k_proj")), wk),
  2976. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.v_proj")), wv),
  2977. ]
  2978. return [(self.map_tensor_name(name), data_torch)]
  2979. return [] # skip other tensors
  2980. @ModelBase.register("WavTokenizerDec")
  2981. class WavTokenizerDecModel(TextModel):
  2982. model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC
  2983. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2984. del bid # unused
  2985. if \
  2986. name.endswith("codebook.cluster_size") or \
  2987. name.endswith("codebook.embed_avg") or \
  2988. name.endswith("codebook.inited"):
  2989. logger.debug(f"Skipping {name!r}")
  2990. return []
  2991. logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}")
  2992. return [(self.map_tensor_name(name), data_torch)]
  2993. def set_vocab(self):
  2994. self._set_vocab_none()
  2995. def set_gguf_parameters(self):
  2996. super().set_gguf_parameters()
  2997. self.gguf_writer.add_vocab_size (self.hparams["vocab_size"])
  2998. self.gguf_writer.add_features_length (self.hparams["n_embd_features"])
  2999. self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"])
  3000. self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"])
  3001. self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"])
  3002. self.gguf_writer.add_posnet_embedding_length(self.hparams["posnet"]["n_embd"])
  3003. self.gguf_writer.add_posnet_block_count (self.hparams["posnet"]["n_layer"])
  3004. self.gguf_writer.add_convnext_embedding_length(self.hparams["convnext"]["n_embd"])
  3005. self.gguf_writer.add_convnext_block_count (self.hparams["convnext"]["n_layer"])
  3006. self.gguf_writer.add_causal_attention(False)
  3007. @ModelBase.register("Qwen2MoeForCausalLM")
  3008. class Qwen2MoeModel(TextModel):
  3009. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  3010. def set_gguf_parameters(self):
  3011. super().set_gguf_parameters()
  3012. if (n_experts := self.hparams.get("num_experts")) is not None:
  3013. self.gguf_writer.add_expert_count(n_experts)
  3014. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  3015. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  3016. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  3017. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  3018. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  3019. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  3020. # YaRN is not enabled by default
  3021. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  3022. rope_scaling = self.hparams.get("rope_scaling") or {}
  3023. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  3024. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  3025. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3026. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  3027. _experts: list[dict[str, Tensor]] | None = None
  3028. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3029. # process the experts separately
  3030. name = name.replace("language_model.", "") # InternVL
  3031. if name.startswith("mlp") or name.startswith("vision_model") or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector"):
  3032. # skip visual tensors
  3033. return []
  3034. if name.find("experts") != -1:
  3035. n_experts = self.hparams["num_experts"]
  3036. assert bid is not None
  3037. if self._experts is None:
  3038. self._experts = [{} for _ in range(self.block_count)]
  3039. self._experts[bid][name] = data_torch
  3040. if len(self._experts[bid]) >= n_experts * 3:
  3041. tensors: list[tuple[str, Tensor]] = []
  3042. # merge the experts into a single 3d tensor
  3043. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3044. datas: list[Tensor] = []
  3045. for xid in range(n_experts):
  3046. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3047. datas.append(self._experts[bid][ename])
  3048. del self._experts[bid][ename]
  3049. data_torch = torch.stack(datas, dim=0)
  3050. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3051. new_name = self.map_tensor_name(merged_name)
  3052. tensors.append((new_name, data_torch))
  3053. return tensors
  3054. else:
  3055. return []
  3056. return [(self.map_tensor_name(name), data_torch)]
  3057. def prepare_tensors(self):
  3058. super().prepare_tensors()
  3059. if self._experts is not None:
  3060. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3061. experts = [k for d in self._experts for k in d.keys()]
  3062. if len(experts) > 0:
  3063. raise ValueError(f"Unprocessed experts: {experts}")
  3064. @ModelBase.register("Qwen3ForCausalLM")
  3065. class Qwen3Model(Qwen2Model):
  3066. model_arch = gguf.MODEL_ARCH.QWEN3
  3067. def __init__(self, *args, **kwargs):
  3068. super().__init__(*args, **kwargs)
  3069. hparams = ModelBase.load_hparams(self.dir_model, is_mistral_format=False)
  3070. self.origin_hf_arch = hparams.get('architectures', [None])[0]
  3071. def set_vocab(self):
  3072. # deal with intern-s1-mini
  3073. if self.origin_hf_arch == 'InternS1ForConditionalGeneration':
  3074. self._set_vocab_interns1()
  3075. return
  3076. super().set_vocab()
  3077. @ModelBase.register("Qwen3MoeForCausalLM")
  3078. class Qwen3MoeModel(Qwen2MoeModel):
  3079. model_arch = gguf.MODEL_ARCH.QWEN3MOE
  3080. def __init__(self, *args, **kwargs):
  3081. super().__init__(*args, **kwargs)
  3082. hparams = ModelBase.load_hparams(self.dir_model, False)
  3083. self.origin_hf_arch = hparams.get('architectures', [None])[0]
  3084. def set_vocab(self):
  3085. # deal with intern-s1
  3086. if self.origin_hf_arch == 'InternS1ForConditionalGeneration':
  3087. self._set_vocab_interns1()
  3088. return
  3089. super().set_vocab()
  3090. @ModelBase.register("Qwen3NextForCausalLM")
  3091. class Qwen3NextModel(Qwen3MoeModel):
  3092. model_arch = gguf.MODEL_ARCH.QWEN3NEXT
  3093. def set_gguf_parameters(self):
  3094. super().set_gguf_parameters()
  3095. self.gguf_writer.add_ssm_conv_kernel(self.find_hparam(["linear_conv_kernel_dim"]))
  3096. self.gguf_writer.add_ssm_state_size(self.find_hparam(["linear_key_head_dim"]))
  3097. self.gguf_writer.add_ssm_group_count(self.find_hparam(["linear_num_key_heads"]))
  3098. self.gguf_writer.add_ssm_time_step_rank(self.find_hparam(["linear_num_value_heads"]))
  3099. self.gguf_writer.add_ssm_inner_size(self.find_hparam(['linear_value_head_dim']) * self.find_hparam(['linear_num_value_heads']))
  3100. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3101. if name.startswith("mtp"):
  3102. return [] # ignore MTP layers for now
  3103. if name.endswith(".A_log"):
  3104. data_torch = -torch.exp(data_torch)
  3105. elif name.endswith(".dt_bias"):
  3106. name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
  3107. elif "conv1d" in name:
  3108. data_torch = data_torch.squeeze()
  3109. elif "q_proj.weight" in name:
  3110. q_proj, gate = data_torch.chunk(2, dim=0)
  3111. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_GATE, bid), gate)
  3112. data_torch = q_proj
  3113. yield from Qwen2MoeModel.modify_tensors(self, data_torch, name, bid)
  3114. @ModelBase.register("GPT2LMHeadModel")
  3115. class GPT2Model(TextModel):
  3116. model_arch = gguf.MODEL_ARCH.GPT2
  3117. def set_gguf_parameters(self):
  3118. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  3119. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  3120. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  3121. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  3122. self.gguf_writer.add_head_count(self.hparams["n_head"])
  3123. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3124. self.gguf_writer.add_file_type(self.ftype)
  3125. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3126. del bid # unused
  3127. tensors: list[tuple[str, Tensor]] = []
  3128. # we don't need these
  3129. if name.endswith((".attn.bias", ".attn.masked_bias")):
  3130. return tensors
  3131. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  3132. data_torch = data_torch.transpose(1, 0)
  3133. new_name = self.map_tensor_name(name)
  3134. tensors.append((new_name, data_torch))
  3135. return tensors
  3136. @ModelBase.register("PhiForCausalLM")
  3137. class Phi2Model(TextModel):
  3138. model_arch = gguf.MODEL_ARCH.PHI2
  3139. def set_gguf_parameters(self):
  3140. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  3141. rot_pct = self.find_hparam(["partial_rotary_factor"])
  3142. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3143. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3144. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  3145. self.gguf_writer.add_embedding_length(n_embd)
  3146. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  3147. self.gguf_writer.add_block_count(block_count)
  3148. self.gguf_writer.add_head_count(n_head)
  3149. self.gguf_writer.add_head_count_kv(n_head)
  3150. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  3151. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  3152. self.gguf_writer.add_file_type(self.ftype)
  3153. self.gguf_writer.add_add_bos_token(False)
  3154. @ModelBase.register("Phi3ForCausalLM")
  3155. class Phi3MiniModel(TextModel):
  3156. model_arch = gguf.MODEL_ARCH.PHI3
  3157. def set_vocab(self):
  3158. # Phi-4 model uses GPT2Tokenizer
  3159. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3160. if tokenizer_config_file.is_file():
  3161. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3162. tokenizer_config_json = json.load(f)
  3163. tokenizer_class = tokenizer_config_json['tokenizer_class']
  3164. if tokenizer_class == 'GPT2Tokenizer':
  3165. return self._set_vocab_gpt2()
  3166. from sentencepiece import SentencePieceProcessor
  3167. tokenizer_path = self.dir_model / 'tokenizer.model'
  3168. if not tokenizer_path.is_file():
  3169. raise ValueError(f'Error: Missing {tokenizer_path}')
  3170. tokenizer = SentencePieceProcessor()
  3171. tokenizer.LoadFromFile(str(tokenizer_path))
  3172. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3173. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3174. scores: list[float] = [-10000.0] * vocab_size
  3175. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3176. for token_id in range(tokenizer.vocab_size()):
  3177. piece = tokenizer.IdToPiece(token_id)
  3178. text = piece.encode("utf-8")
  3179. score = tokenizer.GetScore(token_id)
  3180. toktype = SentencePieceTokenTypes.NORMAL
  3181. if tokenizer.IsUnknown(token_id):
  3182. toktype = SentencePieceTokenTypes.UNKNOWN
  3183. elif tokenizer.IsControl(token_id):
  3184. toktype = SentencePieceTokenTypes.CONTROL
  3185. elif tokenizer.IsUnused(token_id):
  3186. toktype = SentencePieceTokenTypes.UNUSED
  3187. elif tokenizer.IsByte(token_id):
  3188. toktype = SentencePieceTokenTypes.BYTE
  3189. tokens[token_id] = text
  3190. scores[token_id] = score
  3191. toktypes[token_id] = toktype
  3192. added_tokens_file = self.dir_model / 'added_tokens.json'
  3193. if added_tokens_file.is_file():
  3194. with open(added_tokens_file, "r", encoding="utf-8") as f:
  3195. added_tokens_json = json.load(f)
  3196. for key in added_tokens_json:
  3197. token_id = added_tokens_json[key]
  3198. if token_id >= vocab_size:
  3199. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  3200. continue
  3201. tokens[token_id] = key.encode("utf-8")
  3202. scores[token_id] = -1000.0
  3203. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3204. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3205. if tokenizer_config_file.is_file():
  3206. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3207. tokenizer_config_json = json.load(f)
  3208. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  3209. for token_id, foken_data in added_tokens_decoder.items():
  3210. token_id = int(token_id)
  3211. token = foken_data["content"].encode("utf-8")
  3212. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3213. if tokens[token_id] != token:
  3214. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3215. tokens[token_id] = token
  3216. scores[token_id] = -1000.0
  3217. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3218. if foken_data.get("special"):
  3219. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3220. tokenizer_file = self.dir_model / 'tokenizer.json'
  3221. if tokenizer_file.is_file():
  3222. with open(tokenizer_file, "r", encoding="utf-8") as f:
  3223. tokenizer_json = json.load(f)
  3224. added_tokens = tokenizer_json.get("added_tokens", [])
  3225. for foken_data in added_tokens:
  3226. token_id = int(foken_data["id"])
  3227. token = foken_data["content"].encode("utf-8")
  3228. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3229. if tokens[token_id] != token:
  3230. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3231. tokens[token_id] = token
  3232. scores[token_id] = -1000.0
  3233. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3234. if foken_data.get("special"):
  3235. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3236. self.gguf_writer.add_tokenizer_model("llama")
  3237. self.gguf_writer.add_tokenizer_pre("default")
  3238. self.gguf_writer.add_token_list(tokens)
  3239. self.gguf_writer.add_token_scores(scores)
  3240. self.gguf_writer.add_token_types(toktypes)
  3241. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3242. special_vocab.add_to_gguf(self.gguf_writer)
  3243. def set_gguf_parameters(self):
  3244. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  3245. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3246. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3247. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  3248. rms_eps = self.find_hparam(["rms_norm_eps"])
  3249. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  3250. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  3251. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  3252. rope_dims = int(rot_pct * n_embd) // n_head
  3253. self.gguf_writer.add_context_length(max_pos_embds)
  3254. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  3255. self.gguf_writer.add_embedding_length(n_embd)
  3256. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  3257. self.gguf_writer.add_block_count(block_count)
  3258. self.gguf_writer.add_head_count(n_head)
  3259. self.gguf_writer.add_head_count_kv(n_head_kv)
  3260. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  3261. self.gguf_writer.add_rope_dimension_count(rope_dims)
  3262. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  3263. self.gguf_writer.add_file_type(self.ftype)
  3264. sliding_window = self.hparams.get("sliding_window")
  3265. # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
  3266. if sliding_window is None:
  3267. sliding_window = 0
  3268. self.gguf_writer.add_sliding_window(sliding_window)
  3269. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  3270. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3271. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3272. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  3273. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  3274. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  3275. rope_dims = int(rot_pct * n_embd) // n_head
  3276. # write rope scaling for long context (128k) model
  3277. rope_scaling = self.find_hparam(['rope_scaling'], True)
  3278. if rope_scaling is None:
  3279. return
  3280. scale = max_pos_embds / orig_max_pos_embds
  3281. rope_scaling_type = rope_scaling.get('rope_type', rope_scaling.get('type', '')).lower()
  3282. if len(rope_scaling_type) == 0:
  3283. raise KeyError('Missing the required key rope_scaling.type')
  3284. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  3285. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  3286. elif rope_scaling_type == 'yarn':
  3287. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  3288. else:
  3289. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  3290. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  3291. long_factors = rope_scaling.get('long_factor', None)
  3292. short_factors = rope_scaling.get('short_factor', None)
  3293. if long_factors is None or short_factors is None:
  3294. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  3295. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  3296. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}. long_factors = {len(long_factors)}, short_factors = {len(short_factors)}.')
  3297. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  3298. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  3299. @ModelBase.register("PhiMoEForCausalLM")
  3300. class PhiMoeModel(Phi3MiniModel):
  3301. model_arch = gguf.MODEL_ARCH.PHIMOE
  3302. _experts: list[dict[str, Tensor]] | None = None
  3303. def set_gguf_parameters(self):
  3304. super().set_gguf_parameters()
  3305. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  3306. self.gguf_writer.add_expert_count(self.hparams["num_local_experts"])
  3307. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3308. # process the experts separately
  3309. if name.find("block_sparse_moe.experts") != -1:
  3310. n_experts = self.hparams["num_local_experts"]
  3311. assert bid is not None
  3312. if self._experts is None:
  3313. self._experts = [{} for _ in range(self.block_count)]
  3314. self._experts[bid][name] = data_torch
  3315. if len(self._experts[bid]) >= n_experts * 3:
  3316. tensors: list[tuple[str, Tensor]] = []
  3317. # merge the experts into a single 3d tensor
  3318. for w_name in ["w1", "w2", "w3"]:
  3319. datas: list[Tensor] = []
  3320. for xid in range(n_experts):
  3321. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  3322. datas.append(self._experts[bid][ename])
  3323. del self._experts[bid][ename]
  3324. data_torch = torch.stack(datas, dim=0)
  3325. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  3326. new_name = self.map_tensor_name(merged_name)
  3327. tensors.append((new_name, data_torch))
  3328. return tensors
  3329. else:
  3330. return []
  3331. return [(self.map_tensor_name(name), data_torch)]
  3332. def prepare_tensors(self):
  3333. super().prepare_tensors()
  3334. if self._experts is not None:
  3335. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3336. experts = [k for d in self._experts for k in d.keys()]
  3337. if len(experts) > 0:
  3338. raise ValueError(f"Unprocessed experts: {experts}")
  3339. @ModelBase.register("PlamoForCausalLM")
  3340. class PlamoModel(TextModel):
  3341. model_arch = gguf.MODEL_ARCH.PLAMO
  3342. def set_vocab(self):
  3343. self._set_vocab_sentencepiece()
  3344. def set_gguf_parameters(self):
  3345. hparams = self.hparams
  3346. block_count = hparams["num_hidden_layers"]
  3347. self.gguf_writer.add_context_length(4096) # not in config.json
  3348. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3349. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3350. self.gguf_writer.add_block_count(block_count)
  3351. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3352. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  3353. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  3354. self.gguf_writer.add_file_type(self.ftype)
  3355. def shuffle_attn_q_weight(self, data_torch):
  3356. assert data_torch.size() == (5120, 5120)
  3357. data_torch = data_torch.reshape(8, 5, 128, 5120)
  3358. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  3359. data_torch = torch.reshape(data_torch, (5120, 5120))
  3360. return data_torch
  3361. def shuffle_attn_output_weight(self, data_torch):
  3362. assert data_torch.size() == (5120, 5120)
  3363. data_torch = data_torch.reshape(5120, 8, 5, 128)
  3364. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  3365. data_torch = torch.reshape(data_torch, (5120, 5120))
  3366. return data_torch
  3367. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3368. del bid # unused
  3369. new_name = self.map_tensor_name(name)
  3370. # shuffle for broadcasting of gqa in ggml_mul_mat
  3371. if new_name.endswith("attn_q.weight"):
  3372. data_torch = self.shuffle_attn_q_weight(data_torch)
  3373. elif new_name.endswith("attn_output.weight"):
  3374. data_torch = self.shuffle_attn_output_weight(data_torch)
  3375. return [(new_name, data_torch)]
  3376. @ModelBase.register("Plamo2ForCausalLM", "PLaMo2ForCausalLM")
  3377. class Plamo2Model(TextModel):
  3378. model_arch = gguf.MODEL_ARCH.PLAMO2
  3379. def set_vocab(self):
  3380. # PLaMo 2 uses a custom tokenizer with a .jsonl file
  3381. # We need to handle this specially
  3382. tokenizer_jsonl_path = self.dir_model / "tokenizer.jsonl"
  3383. tokenizer_config_path = self.dir_model / "tokenizer_config.json"
  3384. if not tokenizer_jsonl_path.is_file():
  3385. raise FileNotFoundError(f"PLaMo 2 tokenizer file not found: {tokenizer_jsonl_path}")
  3386. # Load tokenizer config
  3387. with open(tokenizer_config_path, 'r', encoding='utf-8') as f:
  3388. tokenizer_config = json.load(f)
  3389. # Load tokens from JSONL file (actually a list format)
  3390. tokens = []
  3391. scores = []
  3392. toktypes = []
  3393. with open(tokenizer_jsonl_path, 'r', encoding='utf-8') as f:
  3394. for line_num, line in enumerate(f):
  3395. if line.strip():
  3396. token_data = json.loads(line)
  3397. # Format: [token, score, type, ?, ?, ?, ?]
  3398. token = token_data[0].encode("utf-8")
  3399. score = float(token_data[1])
  3400. token_type_str = token_data[2] if len(token_data) > 2 else "NORMAL"
  3401. tokens.append(token)
  3402. scores.append(score)
  3403. # Map token type strings to GGUF token types
  3404. if token_type_str == "UNKNOWN":
  3405. toktypes.append(gguf.TokenType.UNKNOWN)
  3406. elif token_type_str == "CONTROL":
  3407. toktypes.append(gguf.TokenType.CONTROL)
  3408. elif token_type_str == "BYTE":
  3409. toktypes.append(gguf.TokenType.BYTE)
  3410. else:
  3411. # Check for PLaMo-2 special tokens
  3412. token_str = token_data[0]
  3413. if token_str.startswith("<|plamo:") and token_str.endswith("|>"):
  3414. toktypes.append(gguf.TokenType.CONTROL)
  3415. else:
  3416. toktypes.append(gguf.TokenType.NORMAL)
  3417. vocab_size = self.hparams["vocab_size"]
  3418. if vocab_size > len(tokens):
  3419. pad_count = vocab_size - len(tokens)
  3420. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  3421. for i in range(1, pad_count + 1):
  3422. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  3423. scores.append(-1000.0)
  3424. toktypes.append(gguf.TokenType.UNUSED)
  3425. # Use "plamo2" tokenizer type for PLaMo-2's custom Aho-Corasick tokenizer
  3426. self.gguf_writer.add_tokenizer_model("plamo2")
  3427. self.gguf_writer.add_tokenizer_pre("default")
  3428. self.gguf_writer.add_token_list(tokens)
  3429. self.gguf_writer.add_token_scores(scores)
  3430. self.gguf_writer.add_token_types(toktypes)
  3431. # Add special tokens from config
  3432. if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] is not None:
  3433. token_id = tokens.index(tokenizer_config["bos_token"].encode("utf-8"))
  3434. self.gguf_writer.add_bos_token_id(token_id)
  3435. if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] is not None:
  3436. token_id = tokens.index(tokenizer_config["eos_token"].encode("utf-8"))
  3437. self.gguf_writer.add_eos_token_id(token_id)
  3438. if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] is not None:
  3439. token_id = tokens.index(tokenizer_config["pad_token"].encode("utf-8"))
  3440. self.gguf_writer.add_pad_token_id(token_id)
  3441. if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] is not None:
  3442. token_id = tokens.index(tokenizer_config["sep_token"].encode("utf-8"))
  3443. self.gguf_writer.add_sep_token_id(token_id)
  3444. if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] is not None:
  3445. token_id = tokens.index(tokenizer_config["unk_token"].encode("utf-8"))
  3446. self.gguf_writer.add_unk_token_id(token_id)
  3447. # Add <|plamo:op|> as EOT to ensure appropriate end of generation
  3448. self.gguf_writer.add_eot_token_id(4)
  3449. self.gguf_writer.add_add_space_prefix(False)
  3450. def set_gguf_parameters(self):
  3451. hparams = self.hparams
  3452. block_count = hparams["num_hidden_layers"]
  3453. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  3454. # Which layers are Mamba layers
  3455. # PLaMo 2 uses mamba_step to indicate the pattern (e.g., 2 means every other layer)
  3456. # This logic matches modeling_plamo.py's is_mamba function
  3457. mamba_step = hparams.get("mamba_step", 2)
  3458. mamba_enabled = hparams.get("mamba_enabled", True)
  3459. mamba_layers = []
  3460. if mamba_enabled:
  3461. for i in range(block_count):
  3462. if block_count <= (mamba_step // 2):
  3463. # use attention in last layer
  3464. is_mamba = (i != block_count - 1)
  3465. else:
  3466. is_mamba = (i % mamba_step) != (mamba_step // 2)
  3467. if is_mamba:
  3468. mamba_layers.append(0)
  3469. else:
  3470. mamba_layers.append(hparams.get("num_key_value_heads", 4))
  3471. if mamba_layers:
  3472. self.gguf_writer.add_head_count_kv(mamba_layers)
  3473. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 2048))
  3474. self.gguf_writer.add_embedding_length(hparams.get("hidden_size", 4096))
  3475. self.gguf_writer.add_block_count(block_count)
  3476. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 32))
  3477. self.gguf_writer.add_layer_norm_rms_eps(hparams.get("rms_norm_eps", 1e-06))
  3478. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 10000))
  3479. # Mamba parameters
  3480. self.gguf_writer.add_ssm_state_size(hparams.get("mamba_d_state", 64))
  3481. self.gguf_writer.add_ssm_conv_kernel(hparams.get("mamba_d_conv", 4))
  3482. self.gguf_writer.add_ssm_time_step_rank(hparams.get("mamba_num_heads", 64))
  3483. intermediate_size = hparams.get("mamba_num_heads", 64) * hparams.get("hidden_size_per_head", 128)
  3484. self.gguf_writer.add_ssm_inner_size(intermediate_size)
  3485. self.gguf_writer.add_ssm_group_count(0)
  3486. # MLP feed forward parameters (for attention layers)
  3487. self.gguf_writer.add_feed_forward_length(hparams.get("intermediate_size", 13312))
  3488. self.gguf_writer.add_file_type(self.ftype)
  3489. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3490. del bid # unused
  3491. if name.endswith(".A_log"):
  3492. data_torch = -torch.exp(data_torch)
  3493. elif name.endswith(".dt_bias"):
  3494. name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
  3495. elif name.endswith(".dt_norm_weight"):
  3496. name = name.rpartition(".dt_norm_weight")[0] + ".dt_norm.weight"
  3497. elif name.endswith(".B_norm_weight"):
  3498. name = name.rpartition(".B_norm_weight")[0] + ".B_norm.weight"
  3499. elif name.endswith(".C_norm_weight"):
  3500. name = name.rpartition(".C_norm_weight")[0] + ".C_norm.weight"
  3501. elif name.endswith(".k_weight"):
  3502. name = name.rpartition(".k_weight")[0] + ".k.weight"
  3503. elif name.endswith(".q_weight"):
  3504. name = name.rpartition(".q_weight")[0] + ".q.weight"
  3505. elif name.endswith(".conv1d.weight"):
  3506. data_torch = torch.squeeze(data_torch) # remove (, 1, )
  3507. assert data_torch.ndim == 2
  3508. elif name.endswith(".pre_mixer_norm.weight"):
  3509. data_torch += 1.0
  3510. elif name.endswith(".post_mixer_norm.weight"):
  3511. data_torch += 1.0 / 5
  3512. elif name.endswith(".pre_mlp_norm.weight"):
  3513. data_torch += 1.0
  3514. elif name.endswith(".post_mlp_norm.weight"):
  3515. data_torch += 1.0 / (5**1.5)
  3516. elif name.endswith(".norm.weight"):
  3517. data_torch += 1.0
  3518. new_name = self.map_tensor_name(name)
  3519. return [(new_name, data_torch)]
  3520. @ModelBase.register("CodeShellForCausalLM")
  3521. class CodeShellModel(TextModel):
  3522. model_arch = gguf.MODEL_ARCH.CODESHELL
  3523. def set_gguf_parameters(self):
  3524. block_count = self.hparams["n_layer"]
  3525. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  3526. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  3527. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  3528. self.gguf_writer.add_block_count(block_count)
  3529. self.gguf_writer.add_head_count(self.hparams["n_head"])
  3530. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  3531. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3532. self.gguf_writer.add_file_type(self.ftype)
  3533. self.gguf_writer.add_rope_freq_base(10000.0)
  3534. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3535. self.gguf_writer.add_rope_scaling_factor(1.0)
  3536. _has_tok_embd = False
  3537. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3538. del bid # unused
  3539. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  3540. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  3541. new_name = self.map_tensor_name(name)
  3542. # assuming token_embd.weight is seen before output.weight
  3543. if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  3544. # even though the tensor file(s) does not contain the word embeddings they are still in the weight map
  3545. if self.tensor_names and "transformer.wte.weight" in self.tensor_names:
  3546. logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied")
  3547. self.tensor_names.remove("transformer.wte.weight")
  3548. elif new_name == tok_embd_name:
  3549. self._has_tok_embd = True
  3550. return [(new_name, data_torch)]
  3551. @ModelBase.register("InternLM2ForCausalLM")
  3552. class InternLM2Model(TextModel):
  3553. model_arch = gguf.MODEL_ARCH.INTERNLM2
  3554. def set_vocab(self):
  3555. # (TODO): Is there a better way?
  3556. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  3557. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  3558. # recognized as an empty string in C++.
  3559. from sentencepiece import SentencePieceProcessor
  3560. from sentencepiece import sentencepiece_model_pb2 as model
  3561. tokenizer_path = self.dir_model / 'tokenizer.model'
  3562. tokens: list[bytes] = []
  3563. scores: list[float] = []
  3564. toktypes: list[int] = []
  3565. if not tokenizer_path.is_file():
  3566. logger.error(f'Error: Missing {tokenizer_path}')
  3567. sys.exit(1)
  3568. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  3569. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  3570. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  3571. tokenizer = SentencePieceProcessor()
  3572. tokenizer.LoadFromFile(str(tokenizer_path))
  3573. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  3574. for token_id in range(vocab_size):
  3575. piece = tokenizer.IdToPiece(token_id)
  3576. text = piece.encode("utf-8")
  3577. score = tokenizer.GetScore(token_id)
  3578. if text == b"\x00":
  3579. # (TODO): fixme
  3580. # Hack here and replace the \x00 characters.
  3581. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  3582. text = "🐉".encode("utf-8")
  3583. toktype = SentencePieceTokenTypes.NORMAL
  3584. if tokenizer.IsUnknown(token_id):
  3585. toktype = SentencePieceTokenTypes.UNKNOWN
  3586. elif tokenizer.IsControl(token_id):
  3587. toktype = SentencePieceTokenTypes.CONTROL
  3588. elif tokenizer.IsUnused(token_id):
  3589. toktype = SentencePieceTokenTypes.UNUSED
  3590. elif tokenizer.IsByte(token_id):
  3591. toktype = SentencePieceTokenTypes.BYTE
  3592. # take care of ununsed raw token
  3593. if piece.startswith('[UNUSED'):
  3594. toktype = SentencePieceTokenTypes.UNUSED
  3595. tokens.append(text)
  3596. scores.append(score)
  3597. toktypes.append(toktype)
  3598. added_tokens_file = self.dir_model / 'added_tokens.json'
  3599. if added_tokens_file.is_file():
  3600. with open(added_tokens_file, "r", encoding="utf-8") as f:
  3601. added_tokens_json = json.load(f)
  3602. for key in added_tokens_json:
  3603. tokens.append(key.encode("utf-8"))
  3604. scores.append(-1000.0)
  3605. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  3606. chat_eos_token = '<|im_end|>'
  3607. chat_eos_token_id = None
  3608. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3609. if tokenizer_config_file.is_file():
  3610. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3611. tokenizer_config_json = json.load(f)
  3612. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  3613. for token_id, foken_data in added_tokens_decoder.items():
  3614. token_id = int(token_id)
  3615. token = foken_data["content"]
  3616. if token == chat_eos_token:
  3617. chat_eos_token_id = token_id
  3618. token = token.encode("utf-8")
  3619. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3620. if tokens[token_id] != token:
  3621. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3622. tokens[token_id] = token
  3623. scores[token_id] = -1000.0
  3624. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3625. if foken_data.get("special"):
  3626. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3627. tokenizer_file = self.dir_model / 'tokenizer.json'
  3628. if tokenizer_file.is_file():
  3629. with open(tokenizer_file, "r", encoding="utf-8") as f:
  3630. tokenizer_json = json.load(f)
  3631. added_tokens = tokenizer_json.get("added_tokens", [])
  3632. for foken_data in added_tokens:
  3633. token_id = int(foken_data["id"])
  3634. token = foken_data["content"]
  3635. if token == chat_eos_token:
  3636. chat_eos_token_id = token_id
  3637. token = token.encode("utf-8")
  3638. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  3639. if tokens[token_id] != token:
  3640. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  3641. tokens[token_id] = token
  3642. scores[token_id] = -1000.0
  3643. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  3644. if foken_data.get("special"):
  3645. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  3646. self.gguf_writer.add_tokenizer_model("llama")
  3647. self.gguf_writer.add_tokenizer_pre("default")
  3648. self.gguf_writer.add_token_list(tokens)
  3649. self.gguf_writer.add_token_scores(scores)
  3650. self.gguf_writer.add_token_types(toktypes)
  3651. self.gguf_writer.add_add_space_prefix(add_prefix)
  3652. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3653. old_eos = special_vocab.special_token_ids["eos"]
  3654. if chat_eos_token_id is not None:
  3655. # For the chat model, we replace the eos with '<|im_end|>'.
  3656. # TODO: this is a hack, should be fixed
  3657. # https://github.com/ggml-org/llama.cpp/pull/6745#issuecomment-2067687048
  3658. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  3659. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  3660. " in chat mode so that the conversation can end normally.")
  3661. special_vocab.add_to_gguf(self.gguf_writer)
  3662. def set_gguf_parameters(self):
  3663. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  3664. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  3665. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  3666. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  3667. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  3668. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  3669. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3670. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  3671. self.gguf_writer.add_file_type(self.ftype)
  3672. rope_scaling = self.hparams.get("rope_scaling") or {}
  3673. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  3674. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3675. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3676. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3677. num_heads = self.hparams["num_attention_heads"]
  3678. num_kv_heads = self.hparams["num_key_value_heads"]
  3679. n_embd = self.hparams["hidden_size"]
  3680. q_per_kv = num_heads // num_kv_heads
  3681. head_dim = n_embd // num_heads
  3682. num_groups = num_heads // q_per_kv
  3683. name = name.replace("language_model.", "") # InternVL
  3684. if name.startswith("mlp") or name.startswith("vision_model"):
  3685. # skip visual tensors
  3686. return []
  3687. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  3688. qkv = data_torch
  3689. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  3690. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  3691. # The model weights of q and k equire additional reshape.
  3692. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  3693. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  3694. v = v.reshape((-1, v.shape[-1]))
  3695. return [
  3696. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  3697. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  3698. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  3699. ]
  3700. else:
  3701. return [(self.map_tensor_name(name), data_torch)]
  3702. @ModelBase.register("InternLM3ForCausalLM")
  3703. class InternLM3Model(TextModel):
  3704. model_arch = gguf.MODEL_ARCH.LLAMA
  3705. def set_vocab(self):
  3706. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  3707. self.gguf_writer.add_tokenizer_model("llama")
  3708. self.gguf_writer.add_tokenizer_pre("default")
  3709. self.gguf_writer.add_token_list(tokens)
  3710. self.gguf_writer.add_token_scores(scores)
  3711. self.gguf_writer.add_token_types(toktypes)
  3712. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3713. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  3714. if tokenizer_config_file.is_file():
  3715. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  3716. tokenizer_config_json = json.load(f)
  3717. if "add_prefix_space" in tokenizer_config_json:
  3718. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  3719. if "added_tokens_decoder" in tokenizer_config_json:
  3720. for token_id, token_data in tokenizer_config_json["added_tokens_decoder"].items():
  3721. if token_data.get("special"):
  3722. token_id = int(token_id)
  3723. token = token_data["content"]
  3724. special_vocab._set_special_token(token, token_id)
  3725. # update eos token
  3726. if token == '<|im_end|>' and "eos" in special_vocab.special_token_ids:
  3727. special_vocab.special_token_ids["eos"] = token_id
  3728. special_vocab.add_to_gguf(self.gguf_writer)
  3729. def set_gguf_parameters(self):
  3730. super().set_gguf_parameters()
  3731. hparams = self.hparams
  3732. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3733. if (rope_dim := hparams.get("head_dim")) is None:
  3734. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  3735. self.gguf_writer.add_rope_dimension_count(rope_dim)
  3736. rope_scaling = self.hparams.get("rope_scaling") or {}
  3737. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  3738. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3739. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3740. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3741. n_head = self.hparams["num_attention_heads"]
  3742. n_kv_head = self.hparams.get("num_key_value_heads")
  3743. name = name.replace("language_model.", "") # InternVL
  3744. if name.startswith("mlp") or name.startswith("vision_model"):
  3745. # skip visual tensors
  3746. return []
  3747. if name.endswith(("q_proj.weight", "q_proj.bias")):
  3748. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3749. if name.endswith(("k_proj.weight", "k_proj.bias")):
  3750. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3751. return [(self.map_tensor_name(name), data_torch)]
  3752. @ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel", "BertForSequenceClassification")
  3753. class BertModel(TextModel):
  3754. model_arch = gguf.MODEL_ARCH.BERT
  3755. def __init__(self, *args, **kwargs):
  3756. super().__init__(*args, **kwargs)
  3757. self.vocab_size = None
  3758. if cls_out_labels := self.hparams.get("id2label"):
  3759. if len(cls_out_labels) == 2 and cls_out_labels[0] == "LABEL_0":
  3760. # Remove dummy labels added by AutoConfig
  3761. cls_out_labels = None
  3762. self.cls_out_labels = cls_out_labels
  3763. def set_gguf_parameters(self):
  3764. super().set_gguf_parameters()
  3765. self.gguf_writer.add_causal_attention(False)
  3766. self._try_set_pooling_type()
  3767. if self.cls_out_labels:
  3768. self.gguf_writer.add_classifier_output_labels([v for k, v in sorted(self.cls_out_labels.items())])
  3769. def set_vocab(self):
  3770. tokens, toktypes, tokpre = self.get_vocab_base()
  3771. self.vocab_size = len(tokens)
  3772. # we need this to validate the size of the token_type embeddings
  3773. # though currently we are passing all zeros to the token_type embeddings
  3774. # "Sequence A" or "Sequence B"
  3775. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3776. # convert to phantom space vocab
  3777. def phantom(tok):
  3778. if tok.startswith("[") and tok.endswith("]"):
  3779. return tok
  3780. if tok.startswith("##"):
  3781. return tok[2:]
  3782. return "\u2581" + tok
  3783. tokens = list(map(phantom, tokens))
  3784. # add vocab to gguf
  3785. self.gguf_writer.add_tokenizer_model("bert")
  3786. self.gguf_writer.add_tokenizer_pre(tokpre)
  3787. self.gguf_writer.add_token_list(tokens)
  3788. self.gguf_writer.add_token_types(toktypes)
  3789. # handle special tokens
  3790. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3791. special_vocab.add_to_gguf(self.gguf_writer)
  3792. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3793. del bid # unused
  3794. if name.startswith("bert."):
  3795. name = name[5:]
  3796. if name.endswith(".gamma"):
  3797. name = name[:-6] + ".weight"
  3798. if name.endswith(".beta"):
  3799. name = name[:-5] + ".bias"
  3800. # we are only using BERT for embeddings so we don't need the pooling layer
  3801. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  3802. return [] # we don't need these
  3803. if name.startswith("cls.predictions"):
  3804. return []
  3805. if name.startswith("cls.seq_relationship"):
  3806. return []
  3807. if self.cls_out_labels:
  3808. # For BertForSequenceClassification (direct projection layer)
  3809. if name == "classifier.weight":
  3810. name = "classifier.out_proj.weight"
  3811. if name == "classifier.bias":
  3812. name = "classifier.out_proj.bias"
  3813. return [(self.map_tensor_name(name), data_torch)]
  3814. def _xlmroberta_tokenizer_init(self) -> None:
  3815. # we need the pad_token_id to know how to chop down position_embd matrix
  3816. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3817. self._position_offset = 1 + pad_token_id
  3818. if "max_position_embeddings" in self.hparams:
  3819. self.hparams["max_position_embeddings"] -= self._position_offset
  3820. else:
  3821. self._position_offset = None
  3822. def _xlmroberta_set_vocab(self) -> None:
  3823. # to avoid TypeError: Descriptors cannot be created directly
  3824. # exception when importing sentencepiece_model_pb2
  3825. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  3826. from sentencepiece import SentencePieceProcessor
  3827. from sentencepiece import sentencepiece_model_pb2 as model
  3828. tokenizer_path = self.dir_model / 'sentencepiece.bpe.model'
  3829. tokenizer_json = {}
  3830. tokenizer_config_json = {}
  3831. if not tokenizer_path.is_file():
  3832. tokenizer_path = self.dir_model / 'tokenizer.json'
  3833. tokenizer_config_path = self.dir_model / 'tokenizer_config.json'
  3834. if not tokenizer_path.is_file():
  3835. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  3836. from base64 import b64decode
  3837. from transformers import AutoTokenizer
  3838. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  3839. with open(tokenizer_path, "r", encoding="utf-8") as fp:
  3840. tokenizer_json = json.load(fp)
  3841. if tokenizer_config_path.is_file():
  3842. with open(tokenizer_config_path, "r", encoding="utf-8") as fp:
  3843. tokenizer_config_json = json.load(fp)
  3844. add_prefix = tokenizer.add_prefix_space
  3845. remove_whitespaces = tokenizer.clean_up_tokenization_spaces
  3846. precompiled_charsmap = b64decode(tokenizer_json["normalizer"]["precompiled_charsmap"])
  3847. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size)
  3848. else:
  3849. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  3850. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  3851. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  3852. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  3853. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  3854. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  3855. tokenizer = SentencePieceProcessor()
  3856. tokenizer.LoadFromFile(str(tokenizer_path))
  3857. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size())
  3858. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3859. scores: list[float] = [-10000.0] * vocab_size
  3860. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3861. if isinstance(tokenizer, SentencePieceProcessor):
  3862. for token_id in range(tokenizer.vocab_size()):
  3863. piece = tokenizer.IdToPiece(token_id)
  3864. text = piece.encode("utf-8")
  3865. score = tokenizer.GetScore(token_id)
  3866. toktype = SentencePieceTokenTypes.NORMAL
  3867. if tokenizer.IsUnknown(token_id):
  3868. toktype = SentencePieceTokenTypes.UNKNOWN
  3869. elif tokenizer.IsControl(token_id):
  3870. toktype = SentencePieceTokenTypes.CONTROL
  3871. elif tokenizer.IsUnused(token_id):
  3872. toktype = SentencePieceTokenTypes.UNUSED
  3873. elif tokenizer.IsByte(token_id):
  3874. toktype = SentencePieceTokenTypes.BYTE
  3875. tokens[token_id] = text
  3876. scores[token_id] = score
  3877. toktypes[token_id] = toktype
  3878. else:
  3879. added_vocab = tokenizer.get_added_vocab()
  3880. unk_token = tokenizer_config_json.get("unk_token")
  3881. unk_token_id = added_vocab.get(unk_token, tokenizer_json["model"].get("unk_id", 3))
  3882. for token_id in range(tokenizer.vocab_size):
  3883. piece = tokenizer._convert_id_to_token(token_id)
  3884. if (piece := tokenizer._convert_id_to_token(token_id)) is not None:
  3885. text = piece.encode("utf-8")
  3886. score = tokenizer_json["model"]["vocab"][token_id][1]
  3887. toktype = SentencePieceTokenTypes.NORMAL
  3888. if token_id == unk_token_id:
  3889. toktype = SentencePieceTokenTypes.UNKNOWN
  3890. elif token_id in tokenizer.all_special_ids:
  3891. toktype = SentencePieceTokenTypes.CONTROL
  3892. elif token_id in added_vocab.values():
  3893. toktype = SentencePieceTokenTypes.USER_DEFINED
  3894. # No reliable way to detect this, but jina doesn't have any
  3895. # elif tokenizer.IsByte(token_id):
  3896. # toktype = SentencePieceTokenTypes.BYTE
  3897. tokens[token_id] = text
  3898. scores[token_id] = score
  3899. toktypes[token_id] = toktype
  3900. if isinstance(tokenizer, SentencePieceProcessor):
  3901. # realign tokens (see HF tokenizer code)
  3902. tokens = [b'<s>', b'<pad>', b'</s>', b'<unk>'] + tokens[3:-1]
  3903. scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1]
  3904. toktypes = [
  3905. SentencePieceTokenTypes.CONTROL,
  3906. SentencePieceTokenTypes.CONTROL,
  3907. SentencePieceTokenTypes.CONTROL,
  3908. SentencePieceTokenTypes.UNKNOWN,
  3909. ] + toktypes[3:-1]
  3910. if self.model_arch == gguf.MODEL_ARCH.NOMIC_BERT_MOE:
  3911. # Add mask token missing from sentencepiece.bpe.model
  3912. tokens[250001] = b'<mask>'
  3913. scores[250001] = 0.0
  3914. toktypes[250001] = SentencePieceTokenTypes.CONTROL
  3915. self.gguf_writer.add_tokenizer_model("t5")
  3916. self.gguf_writer.add_tokenizer_pre("default")
  3917. self.gguf_writer.add_token_list(tokens)
  3918. self.gguf_writer.add_token_scores(scores)
  3919. self.gguf_writer.add_token_types(toktypes)
  3920. self.gguf_writer.add_add_space_prefix(add_prefix)
  3921. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3922. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  3923. if precompiled_charsmap:
  3924. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  3925. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3926. special_vocab.add_to_gguf(self.gguf_writer)
  3927. @ModelBase.register("DistilBertModel", "DistilBertForMaskedLM", "DistilBertForSequenceClassification")
  3928. class DistilBertModel(BertModel):
  3929. model_arch = gguf.MODEL_ARCH.BERT
  3930. def set_gguf_parameters(self):
  3931. self.gguf_writer.add_layer_norm_eps(1e-12)
  3932. logger.info("gguf: layer norm epsilon = 1e-12")
  3933. super().set_gguf_parameters()
  3934. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3935. if name.startswith("distilbert."):
  3936. name = name[11:]
  3937. # These layers act as MLM head, so we don't need them
  3938. if name.startswith("vocab_"):
  3939. return []
  3940. return super().modify_tensors(data_torch, name, bid)
  3941. @ModelBase.register("RobertaModel", "RobertaForSequenceClassification")
  3942. class RobertaModel(BertModel):
  3943. model_arch = gguf.MODEL_ARCH.BERT
  3944. def __init__(self, *args, **kwargs):
  3945. super().__init__(*args, **kwargs)
  3946. # we need the pad_token_id to know how to chop down position_embd matrix
  3947. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3948. self._position_offset = 1 + pad_token_id
  3949. if "max_position_embeddings" in self.hparams:
  3950. self.hparams["max_position_embeddings"] -= self._position_offset
  3951. else:
  3952. self._position_offset = None
  3953. def set_vocab(self):
  3954. """Support BPE tokenizers for roberta models"""
  3955. bpe_tok_path = self.dir_model / "tokenizer.json"
  3956. if bpe_tok_path.exists():
  3957. self._set_vocab_gpt2()
  3958. # we need this to validate the size of the token_type embeddings
  3959. # though currently we are passing all zeros to the token_type embeddings
  3960. # "Sequence A" or "Sequence B"
  3961. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3962. else:
  3963. return super().set_vocab()
  3964. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3965. # if name starts with "roberta.", remove the prefix
  3966. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  3967. if name.startswith("roberta."):
  3968. name = name[8:]
  3969. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  3970. if name == "embeddings.position_embeddings.weight":
  3971. if self._position_offset is not None:
  3972. data_torch = data_torch[self._position_offset:,:]
  3973. return super().modify_tensors(data_torch, name, bid)
  3974. @ModelBase.register("NomicBertModel")
  3975. class NomicBertModel(BertModel):
  3976. model_arch = gguf.MODEL_ARCH.BERT
  3977. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  3978. hparams = kwargs.pop("hparams", None)
  3979. if hparams is None:
  3980. hparams = ModelBase.load_hparams(dir_model, False)
  3981. self.is_moe = bool(hparams.get("moe_every_n_layers"))
  3982. self.model_arch = gguf.MODEL_ARCH.NOMIC_BERT_MOE if self.is_moe else gguf.MODEL_ARCH.NOMIC_BERT
  3983. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  3984. self._tokenizer_is_xlmroberta = self._is_tokenizer_xlmroberta()
  3985. if self._tokenizer_is_xlmroberta:
  3986. self._xlmroberta_tokenizer_init()
  3987. npos, mtp = self.hparams["n_positions"], self.hparams.get("max_trained_positions", 2048)
  3988. if npos == 8192 and mtp == 2048:
  3989. self.hparams["n_positions"] = 2048 # nomic-embed-text v1 and v1.5 are trained for 2048 tokens.
  3990. elif npos == 2048 and mtp == 2048:
  3991. self.hparams["n_positions"] = 512 # nomic-embed-text-v2-moe is trained for 512 tokens.
  3992. else:
  3993. raise ValueError(f"unrecognized parameters: n_positions={npos}, max_trained_positions={mtp}")
  3994. assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu"
  3995. # this doesn't do anything in the HF version
  3996. assert self.hparams["causal"] is False
  3997. # no bias tensors unless MoE
  3998. assert self.hparams["qkv_proj_bias"] == self.is_moe
  3999. assert self.hparams["mlp_fc1_bias"] == self.is_moe
  4000. assert self.hparams["mlp_fc2_bias"] == self.is_moe
  4001. # norm at end of layer
  4002. assert self.hparams["prenorm"] is False
  4003. # standard RoPE
  4004. assert self.hparams["rotary_emb_fraction"] == 1.0
  4005. assert self.hparams["rotary_emb_interleaved"] is False
  4006. assert self.hparams["rotary_emb_scale_base"] is None
  4007. def set_vocab(self) -> None:
  4008. if self._tokenizer_is_xlmroberta:
  4009. return self._xlmroberta_set_vocab()
  4010. return super().set_vocab()
  4011. def modify_tensors(self, data_torch: torch.Tensor, name: str, bid: int | None) -> Iterable[tuple[str, torch.Tensor]]:
  4012. # If the tensor is an experts bias tensor, skip it by returning an empty list.
  4013. if "mlp.experts.bias" in name:
  4014. return [] # Explicitly return an empty list.
  4015. if "mlp.experts.mlp.w1" in name:
  4016. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  4017. name += ".weight"
  4018. if "mlp.experts.mlp.w2" in name:
  4019. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  4020. data_torch = data_torch.transpose(1, 2)
  4021. name += ".weight"
  4022. return [(self.map_tensor_name(name), data_torch)]
  4023. def set_gguf_parameters(self):
  4024. super().set_gguf_parameters()
  4025. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  4026. if self.is_moe:
  4027. self.gguf_writer.add_moe_every_n_layers(self.hparams["moe_every_n_layers"])
  4028. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  4029. self.gguf_writer.add_expert_used_count(self.hparams["moe_top_k"])
  4030. def _is_tokenizer_xlmroberta(self) -> bool:
  4031. with open(self.dir_model / "tokenizer.json") as f:
  4032. tokenizer_json = json.load(f)
  4033. toktyp = tokenizer_json["model"]["type"]
  4034. if toktyp == "Unigram":
  4035. return True
  4036. if toktyp == "WordPiece":
  4037. return False
  4038. raise ValueError(f"unknown tokenizer: {toktyp}")
  4039. @ModelBase.register("NeoBERT", "NeoBERTLMHead", "NeoBERTForSequenceClassification")
  4040. class NeoBert(BertModel):
  4041. model_arch = gguf.MODEL_ARCH.NEO_BERT
  4042. def set_gguf_parameters(self):
  4043. super().set_gguf_parameters()
  4044. # NeoBERT uses 2/3 of the intermediate size as feed forward length
  4045. self.gguf_writer.add_feed_forward_length(int(2 * self.hparams["intermediate_size"] / 3))
  4046. self.gguf_writer.add_rope_freq_base(10000.0) # default value for NeoBERT
  4047. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4048. f_rms_eps = self.hparams.get("norm_eps", 1e-6) # default value for NeoBERT
  4049. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  4050. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  4051. self.gguf_writer.add_pooling_type(gguf.PoolingType.CLS) # https://huggingface.co/chandar-lab/NeoBERT#how-to-use
  4052. def modify_tensors(self, data_torch, name, bid):
  4053. if name.startswith("decoder."):
  4054. return []
  4055. if name.startswith("model."):
  4056. name = name[6:]
  4057. return super().modify_tensors(data_torch, name, bid)
  4058. @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification")
  4059. class XLMRobertaModel(BertModel):
  4060. model_arch = gguf.MODEL_ARCH.BERT
  4061. _lora_files = {}
  4062. _lora_names = []
  4063. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  4064. hparams = kwargs.pop("hparams", None)
  4065. if hparams is None:
  4066. hparams = ModelBase.load_hparams(dir_model, False)
  4067. if lora_names := hparams.get("lora_adaptations"):
  4068. self._lora_names = lora_names
  4069. self.model_arch = gguf.MODEL_ARCH.JINA_BERT_V3
  4070. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  4071. self._xlmroberta_tokenizer_init()
  4072. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  4073. if self._lora_names:
  4074. for name in self._lora_names:
  4075. fname = self.add_prefix_to_filename(self.fname_out, f"lora-{name}-")
  4076. self._lora_files[name] = gguf.GGUFWriter(fname, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, dry_run=self.dry_run)
  4077. return super().generate_extra_tensors()
  4078. def set_type(self):
  4079. for lora_writer in self._lora_files.values():
  4080. lora_writer.add_type(gguf.GGUFType.ADAPTER)
  4081. lora_writer.add_string(gguf.Keys.Adapter.TYPE, "lora")
  4082. super().set_type()
  4083. def set_vocab(self):
  4084. self._xlmroberta_set_vocab()
  4085. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4086. # if name starts with "roberta.", remove the prefix
  4087. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  4088. if name.startswith("roberta."):
  4089. name = name[8:]
  4090. # jina-embeddings-v3
  4091. if ".parametrizations." in name:
  4092. name = name.replace(".parametrizations.", ".")
  4093. if name.endswith(".original"):
  4094. name = name[:-9]
  4095. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  4096. if name == "embeddings.position_embeddings.weight":
  4097. if self._position_offset is not None:
  4098. data_torch = data_torch[self._position_offset:,:]
  4099. if name.endswith(".0.lora_A") or name.endswith(".0.lora_B"):
  4100. if name.startswith("pooler.dense"):
  4101. return []
  4102. num_loras = data_torch.size(0)
  4103. assert num_loras == len(self._lora_names)
  4104. # Split out each LoRA in their own GGUF
  4105. for i, lora_writer in enumerate(self._lora_files.values()):
  4106. new_name = self.map_tensor_name(name[:-9]) + name[-7:].lower()
  4107. data = data_torch[i, :, :]
  4108. # Transpose/flip token_embd/types into correct shape
  4109. if new_name == "token_embd.weight.lora_b":
  4110. data = data.T
  4111. elif new_name.startswith("token_types.weight."):
  4112. new_name = new_name[:-1] + ("a" if new_name[-1:] == "b" else "b")
  4113. lora_writer.add_tensor(new_name, data.float().numpy(), raw_dtype=gguf.GGMLQuantizationType.F32)
  4114. return []
  4115. return super().modify_tensors(data_torch, name, bid)
  4116. def set_gguf_parameters(self):
  4117. super().set_gguf_parameters()
  4118. # jina-embeddings-v3
  4119. if rotary_emb_base := self.hparams.get("rotary_emb_base"):
  4120. self.gguf_writer.add_rope_freq_base(rotary_emb_base)
  4121. lora_alpha = self.hparams.get("lora_alpha")
  4122. if lora_prompt_prefixes := self.hparams.get("task_instructions"):
  4123. assert self._lora_files and all(lora_name in lora_prompt_prefixes for lora_name in self._lora_files.keys())
  4124. for lora_name, lora_writer in self._lora_files.items():
  4125. lora_writer.add_float32(gguf.Keys.Adapter.LORA_ALPHA, lora_alpha if lora_alpha is not None else 1.0)
  4126. lora_writer.add_string(gguf.Keys.Adapter.LORA_TASK_NAME, lora_name)
  4127. if lora_prompt_prefixes:
  4128. lora_writer.add_string(gguf.Keys.Adapter.LORA_PROMPT_PREFIX, lora_prompt_prefixes[lora_name])
  4129. def write(self):
  4130. super().write()
  4131. for lora_writer in self._lora_files.values():
  4132. lora_writer.write_header_to_file()
  4133. lora_writer.write_kv_data_to_file()
  4134. lora_writer.write_tensors_to_file(progress=True)
  4135. lora_writer.close()
  4136. @ModelBase.register("GemmaForCausalLM")
  4137. class GemmaModel(TextModel):
  4138. model_arch = gguf.MODEL_ARCH.GEMMA
  4139. def set_vocab(self):
  4140. self._set_vocab_sentencepiece()
  4141. # TODO: these special tokens should be exported only for the CodeGemma family
  4142. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  4143. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  4144. special_vocab._set_special_token("prefix", 67)
  4145. special_vocab._set_special_token("suffix", 69)
  4146. special_vocab._set_special_token("middle", 68)
  4147. special_vocab._set_special_token("fsep", 70)
  4148. special_vocab._set_special_token("eot", 107)
  4149. special_vocab.chat_template = None # do not add it twice
  4150. special_vocab.add_to_gguf(self.gguf_writer)
  4151. self.gguf_writer.add_add_space_prefix(False)
  4152. def set_gguf_parameters(self):
  4153. hparams = self.hparams
  4154. block_count = hparams["num_hidden_layers"]
  4155. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  4156. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4157. self.gguf_writer.add_block_count(block_count)
  4158. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4159. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  4160. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  4161. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  4162. self.gguf_writer.add_key_length(hparams["head_dim"])
  4163. self.gguf_writer.add_value_length(hparams["head_dim"])
  4164. self.gguf_writer.add_file_type(self.ftype)
  4165. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4166. del bid # unused
  4167. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  4168. # To prevent errors, skip loading lm_head.weight.
  4169. if name == "lm_head.weight":
  4170. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  4171. return []
  4172. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  4173. if name.endswith("norm.weight"):
  4174. data_torch = data_torch + 1
  4175. return [(self.map_tensor_name(name), data_torch)]
  4176. @ModelBase.register("Gemma2ForCausalLM")
  4177. class Gemma2Model(TextModel):
  4178. model_arch = gguf.MODEL_ARCH.GEMMA2
  4179. def set_vocab(self):
  4180. self._set_vocab_sentencepiece()
  4181. self.gguf_writer.add_add_space_prefix(False)
  4182. def set_gguf_parameters(self):
  4183. hparams = self.hparams
  4184. block_count = hparams["num_hidden_layers"]
  4185. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  4186. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4187. self.gguf_writer.add_block_count(block_count)
  4188. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4189. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  4190. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  4191. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  4192. self.gguf_writer.add_key_length(hparams["head_dim"])
  4193. self.gguf_writer.add_value_length(hparams["head_dim"])
  4194. self.gguf_writer.add_file_type(self.ftype)
  4195. self.gguf_writer.add_attn_logit_softcapping(
  4196. self.hparams["attn_logit_softcapping"]
  4197. )
  4198. self.gguf_writer.add_final_logit_softcapping(
  4199. self.hparams["final_logit_softcapping"]
  4200. )
  4201. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  4202. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4203. del bid # unused
  4204. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  4205. # To prevent errors, skip loading lm_head.weight.
  4206. if name == "lm_head.weight":
  4207. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  4208. return []
  4209. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  4210. if name.endswith("norm.weight"):
  4211. data_torch = data_torch + 1
  4212. return [(self.map_tensor_name(name), data_torch)]
  4213. @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration")
  4214. class Gemma3Model(TextModel):
  4215. model_arch = gguf.MODEL_ARCH.GEMMA3
  4216. norm_shift = 1.0 # Gemma3RMSNorm adds 1.0 to the norm value
  4217. def set_vocab(self):
  4218. self._set_vocab_sentencepiece()
  4219. self.gguf_writer.add_add_space_prefix(False)
  4220. def set_gguf_parameters(self):
  4221. hparams = self.hparams
  4222. block_count = hparams["num_hidden_layers"]
  4223. # some default values are not specified in the hparams
  4224. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072))
  4225. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  4226. self.gguf_writer.add_block_count(block_count)
  4227. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  4228. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8))
  4229. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6))
  4230. self.gguf_writer.add_key_length(hparams.get("head_dim", 256))
  4231. self.gguf_writer.add_value_length(hparams.get("head_dim", 256))
  4232. self.gguf_writer.add_file_type(self.ftype)
  4233. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers
  4234. # attn_logit_softcapping is removed in Gemma3
  4235. assert hparams.get("attn_logit_softcapping") is None
  4236. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  4237. self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4))
  4238. if hparams.get("rope_scaling") is not None:
  4239. assert hparams["rope_scaling"]["rope_type"] == "linear"
  4240. # important: this rope_scaling is only applied for global layers, and not used by 1B model
  4241. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4242. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  4243. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4244. del bid # unused
  4245. if "language_model." in name:
  4246. name = name.replace("language_model.", "")
  4247. elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  4248. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  4249. return [] # skip vision tensors
  4250. # remove OOV (out-of-vocabulary) rows in token_embd
  4251. if "embed_tokens.weight" in name:
  4252. vocab = self._create_vocab_sentencepiece()
  4253. tokens = vocab[0]
  4254. data_torch = data_torch[:len(tokens)]
  4255. # ref code in Gemma3RMSNorm
  4256. # output = output * (1.0 + self.weight.float())
  4257. # note: this is not the case on gemma3n
  4258. if name.endswith("norm.weight"):
  4259. data_torch = data_torch + self.norm_shift
  4260. return [(self.map_tensor_name(name), data_torch)]
  4261. @ModelBase.register("Gemma3TextModel")
  4262. class EmbeddingGemma(Gemma3Model):
  4263. model_arch = gguf.MODEL_ARCH.GEMMA_EMBEDDING
  4264. def set_gguf_parameters(self):
  4265. super().set_gguf_parameters()
  4266. # Override the sliding window size as it gets adjusted by the Gemma3TextConfig
  4267. # constructor. We want to use the value from the original model's config.json.
  4268. # ref: https://github.com/huggingface/transformers/pull/40700
  4269. with open(self.dir_model / "config.json", "r", encoding="utf-8") as f:
  4270. config = json.load(f)
  4271. orig_sliding_window = config.get("sliding_window")
  4272. if orig_sliding_window is None:
  4273. raise ValueError("sliding_window not found in model config - this is required for the model")
  4274. logger.info(f"Using original sliding_window from config: {orig_sliding_window} "
  4275. f"instead of {self.hparams['sliding_window']}")
  4276. self.gguf_writer.add_sliding_window(orig_sliding_window)
  4277. self._try_set_pooling_type()
  4278. @ModelBase.register("Gemma3ForConditionalGeneration")
  4279. class Gemma3VisionModel(MmprojModel):
  4280. def set_gguf_parameters(self):
  4281. super().set_gguf_parameters()
  4282. hparams = self.hparams
  4283. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.GEMMA3)
  4284. # default values below are taken from HF tranformers code
  4285. self.gguf_writer.add_vision_attention_layernorm_eps(hparams.get("layer_norm_eps", 1e-6))
  4286. self.gguf_writer.add_vision_use_gelu(True)
  4287. # calculate proj_scale_factor (used by tinygemma3 test model)
  4288. image_seq_length = self.preprocessor_config.get("image_seq_length", 256)
  4289. n_per_side = int(image_seq_length ** 0.5)
  4290. image_size = self.hparams["image_size"]
  4291. patch_size = self.hparams["patch_size"]
  4292. proj_scale_factor = (image_size // patch_size) // n_per_side
  4293. if proj_scale_factor > 0 and proj_scale_factor != 4:
  4294. # we only need to write this if it's not the default value
  4295. # in this case, we are converting a test model
  4296. self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
  4297. def tensor_force_quant(self, name, new_name, bid, n_dims):
  4298. # related to https://github.com/ggml-org/llama.cpp/issues/13025
  4299. if "input_projection" in name:
  4300. return gguf.GGMLQuantizationType.F16
  4301. if ".embeddings." in name:
  4302. return gguf.GGMLQuantizationType.F32
  4303. return super().tensor_force_quant(name, new_name, bid, n_dims)
  4304. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4305. del bid # unused
  4306. if "vision_model.head." in name:
  4307. return [] # skip redundant tensors for tinygemma3
  4308. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  4309. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  4310. # process vision tensors
  4311. name = name.replace("_weight", ".weight")
  4312. # correct norm value ; only this "soft_emb_norm" need to be corrected as it's part of Gemma projector
  4313. # the other norm values are part of SigLIP model, and they are already correct
  4314. # ref code: Gemma3RMSNorm
  4315. if "soft_emb_norm.weight" in name:
  4316. logger.info(f"Correcting norm value for '{name}'")
  4317. data_torch = data_torch + 1
  4318. return [(self.map_tensor_name(name), data_torch)]
  4319. return [] # skip other tensors
  4320. @ModelBase.register("Gemma3nForConditionalGeneration")
  4321. class Gemma3NModel(Gemma3Model):
  4322. model_arch = gguf.MODEL_ARCH.GEMMA3N
  4323. norm_shift = 0.0 # same value with Gemma3p5RMSNorm scale_shift on python code
  4324. _altup_proj: list[Tensor] = []
  4325. _altup_unembd: list[Tensor] = []
  4326. def __init__(self, *args, **kwargs):
  4327. super().__init__(*args, **kwargs)
  4328. assert self.hparams["altup_num_inputs"] == 4, "Current conversion only supports 4 altup inputs"
  4329. self._altup_proj = [
  4330. torch.Tensor(), # to be replaced
  4331. torch.Tensor(), # to be replaced
  4332. torch.Tensor(), # to be replaced
  4333. ]
  4334. self._altup_unembd = [
  4335. torch.Tensor(), # to be replaced
  4336. torch.Tensor(), # to be replaced
  4337. torch.Tensor(), # to be replaced
  4338. ]
  4339. def set_vocab(self):
  4340. super().set_vocab()
  4341. def set_gguf_parameters(self):
  4342. super().set_gguf_parameters()
  4343. self.gguf_writer.add_altup_active_idx(self.hparams["altup_active_idx"])
  4344. self.gguf_writer.add_altup_num_inputs(self.hparams["altup_num_inputs"])
  4345. self.gguf_writer.add_embedding_length_per_layer_input(self.hparams["hidden_size_per_layer_input"])
  4346. self.gguf_writer.add_shared_kv_layers(self.hparams["num_kv_shared_layers"])
  4347. activation_sparsity_scale = []
  4348. for s in self.hparams["activation_sparsity_pattern"]:
  4349. normal_dist = torch.distributions.normal.Normal(0, 1)
  4350. std_multiplier = normal_dist.icdf(torch.tensor(s, dtype=torch.float32))
  4351. activation_sparsity_scale.append(std_multiplier.item())
  4352. self.gguf_writer.add_activation_sparsity_scale(activation_sparsity_scale)
  4353. sliding_window_pattern = []
  4354. for t in self.hparams["layer_types"]:
  4355. sliding_window_pattern.append(t == "sliding_attention")
  4356. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  4357. def _stack_matrices(self, matrices: list[Tensor]) -> Tensor | None:
  4358. has_all = all(m.numel() > 0 for m in matrices)
  4359. if not has_all:
  4360. return None
  4361. else:
  4362. return torch.stack(matrices, dim=0)
  4363. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4364. if name.endswith("_scale"):
  4365. name = name + ".weight"
  4366. # TODO: implement self.prediction_coefs.weight.clamp_(...)
  4367. if "language_model." not in name:
  4368. return [] # skip non-language model tensors
  4369. if "altup_unembed_projections" in name:
  4370. data_torch = data_torch.to(device="cpu")
  4371. if ".0." in name:
  4372. self._altup_unembd[0] = data_torch
  4373. elif ".1." in name:
  4374. self._altup_unembd[1] = data_torch
  4375. elif ".2." in name:
  4376. self._altup_unembd[2] = data_torch
  4377. else:
  4378. raise ValueError(f"Unknown name: {name}")
  4379. out = self._stack_matrices(self._altup_unembd)
  4380. if out is not None:
  4381. return [(self.map_tensor_name("model.altup_unembed_projections.weight"), out)]
  4382. else:
  4383. return []
  4384. if "altup_projections" in name:
  4385. data_torch = data_torch.to(device="cpu")
  4386. if ".0." in name:
  4387. self._altup_proj[0] = data_torch
  4388. elif ".1." in name:
  4389. self._altup_proj[1] = data_torch
  4390. elif ".2." in name:
  4391. self._altup_proj[2] = data_torch
  4392. else:
  4393. raise ValueError(f"Unknown name: {name}")
  4394. out = self._stack_matrices(self._altup_proj)
  4395. if out is not None:
  4396. return [(self.map_tensor_name("model.altup_projections.weight"), out)]
  4397. else:
  4398. return []
  4399. return super().modify_tensors(data_torch, name, bid)
  4400. @ModelBase.register("Starcoder2ForCausalLM")
  4401. class StarCoder2Model(TextModel):
  4402. model_arch = gguf.MODEL_ARCH.STARCODER2
  4403. @ModelBase.register("Rwkv6ForCausalLM")
  4404. class Rwkv6Model(TextModel):
  4405. model_arch = gguf.MODEL_ARCH.RWKV6
  4406. def set_vocab(self):
  4407. self._set_vocab_rwkv_world()
  4408. def set_gguf_parameters(self):
  4409. block_count = self.hparams["num_hidden_layers"]
  4410. head_size = self.hparams["head_size"]
  4411. hidden_size = self.hparams["hidden_size"]
  4412. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  4413. rescale_every_n_layers = self.hparams["rescale_every"]
  4414. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else int((hidden_size * 3.5) // 32 * 32)
  4415. time_mix_extra_dim = 64 if hidden_size == 4096 else 32
  4416. time_decay_extra_dim = 128 if hidden_size == 4096 else 64
  4417. # RWKV isn't context limited
  4418. self.gguf_writer.add_context_length(1048576)
  4419. self.gguf_writer.add_embedding_length(hidden_size)
  4420. self.gguf_writer.add_block_count(block_count)
  4421. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  4422. self.gguf_writer.add_rescale_every_n_layers(rescale_every_n_layers)
  4423. self.gguf_writer.add_wkv_head_size(head_size)
  4424. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  4425. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  4426. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4427. self.gguf_writer.add_file_type(self.ftype)
  4428. # required by llama.cpp, unused
  4429. self.gguf_writer.add_head_count(0)
  4430. lerp_weights: dict[int, dict[str, Tensor]] = {}
  4431. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4432. new_name = self.map_tensor_name(name)
  4433. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  4434. new_name += ".weight"
  4435. if new_name.endswith("time_mix_w1.weight") or new_name.endswith("time_mix_decay_w1.weight") or new_name.endswith("time_mix_decay_w2.weight"):
  4436. data_torch = data_torch.transpose(0, 1)
  4437. if new_name.endswith("time_mix_w2.weight"):
  4438. data_torch = data_torch.permute(0, 2, 1)
  4439. if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name:
  4440. data_torch = data_torch.squeeze()
  4441. try:
  4442. rescale_every_n_layers = self.hparams["rescale_every"]
  4443. if rescale_every_n_layers > 0:
  4444. if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):
  4445. data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers))
  4446. except KeyError:
  4447. pass
  4448. # concat time_mix_lerp weights to reduce some cpu overhead
  4449. # also reduces the number of tensors in the model
  4450. if bid is not None and "time_mix_lerp" in new_name and "time_mix_lerp_x" not in new_name:
  4451. try:
  4452. self.lerp_weights[bid][new_name] = data_torch
  4453. except KeyError:
  4454. self.lerp_weights[bid] = {new_name: data_torch}
  4455. if all(f"blk.{bid}.time_mix_lerp_{i}.weight" in self.lerp_weights[bid].keys() for i in ["w", "k", "v", "r", "g"]):
  4456. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4457. data = torch.stack([self.lerp_weights[bid][f"blk.{bid}.time_mix_lerp_{i}.weight"].unsqueeze(0) for i in ["w", "k", "v", "r", "g"]], dim=0).unsqueeze(1)
  4458. yield (new_name, data)
  4459. return
  4460. yield (new_name, data_torch)
  4461. @ModelBase.register("RWKV6Qwen2ForCausalLM")
  4462. class RWKV6Qwen2Model(Rwkv6Model):
  4463. model_arch = gguf.MODEL_ARCH.RWKV6QWEN2
  4464. def set_vocab(self):
  4465. try:
  4466. self._set_vocab_sentencepiece()
  4467. except FileNotFoundError:
  4468. self._set_vocab_gpt2()
  4469. def set_gguf_parameters(self):
  4470. block_count = self.hparams["num_hidden_layers"]
  4471. num_attention_heads = self.hparams["num_attention_heads"]
  4472. num_key_value_heads = self.hparams["num_key_value_heads"]
  4473. hidden_size = self.hparams["hidden_size"]
  4474. head_size = hidden_size // num_attention_heads
  4475. rms_norm_eps = self.hparams["rms_norm_eps"]
  4476. intermediate_size = self.hparams["intermediate_size"]
  4477. time_mix_extra_dim = self.hparams.get("lora_rank_tokenshift", 64 if hidden_size >= 4096 else 32)
  4478. time_decay_extra_dim = self.hparams.get("lora_rank_decay", 128 if hidden_size >= 4096 else 64)
  4479. # RWKV isn't context limited
  4480. self.gguf_writer.add_context_length(1048576)
  4481. self.gguf_writer.add_embedding_length(hidden_size)
  4482. self.gguf_writer.add_block_count(block_count)
  4483. self.gguf_writer.add_wkv_head_size(head_size)
  4484. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  4485. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  4486. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4487. self.gguf_writer.add_file_type(self.ftype)
  4488. # special parameters for time_mixing in RWKV6QWEN2
  4489. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4490. self.gguf_writer.add_token_shift_count(1)
  4491. # RWKV6QWEN2 use grouped key/value like GQA
  4492. self.gguf_writer.add_head_count_kv(num_key_value_heads)
  4493. # required by llama.cpp, unused
  4494. self.gguf_writer.add_head_count(0)
  4495. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4496. for new_name, data in super().modify_tensors(data_torch, name, bid):
  4497. if "time_mix_w1" in new_name or "time_mix_w2" in new_name:
  4498. data = data.view(5, -1, data.shape[-1])
  4499. # rwkv6qwen2 has a different order of rkvwg instead of the original wkvrg
  4500. # permute them here to avoid code changes
  4501. data = torch.stack([data[3], data[1], data[2], data[0], data[4]], dim=0).view(-1, data.shape[-1])
  4502. if "w2" in new_name:
  4503. data = data.view(5, -1, data.shape[-1])
  4504. yield (new_name, data)
  4505. continue
  4506. yield (new_name, data)
  4507. @ModelBase.register("Rwkv7ForCausalLM", "RWKV7ForCausalLM")
  4508. class Rwkv7Model(TextModel):
  4509. model_arch = gguf.MODEL_ARCH.RWKV7
  4510. def set_vocab(self):
  4511. self._set_vocab_rwkv_world()
  4512. def calc_lora_rank(self, hidden_size, exponent, multiplier):
  4513. return max(1, round(hidden_size ** exponent * multiplier / 32)) * 32
  4514. def set_gguf_parameters(self):
  4515. block_count = self.hparams["num_hidden_layers"]
  4516. try:
  4517. head_size = self.hparams["head_size"]
  4518. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  4519. except KeyError:
  4520. head_size = self.hparams["head_dim"]
  4521. layer_norm_eps = self.hparams["norm_eps"]
  4522. hidden_size = self.hparams["hidden_size"]
  4523. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else (hidden_size * 4)
  4524. # ICLR: In-Context-Learning-Rate
  4525. try:
  4526. lora_rank_decay = self.hparams["lora_rank_decay"] if self.hparams["lora_rank_decay"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4527. lora_rank_iclr = self.hparams["lora_rank_iclr"] if self.hparams["lora_rank_iclr"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4528. lora_rank_value_residual_mix = self.hparams["lora_rank_value_residual_mix"] if self.hparams["lora_rank_value_residual_mix"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  4529. lora_rank_gate = self.hparams["lora_rank_gate"] if self.hparams["lora_rank_gate"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  4530. except KeyError:
  4531. lora_rank_decay = self.hparams["decay_low_rank_dim"] if self.hparams["decay_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4532. lora_rank_iclr = self.hparams["a_low_rank_dim"] if self.hparams["a_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  4533. lora_rank_value_residual_mix = self.hparams["v_low_rank_dim"] if self.hparams["v_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  4534. lora_rank_gate = self.hparams["gate_low_rank_dim"] if self.hparams["gate_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  4535. # RWKV isn't context limited
  4536. self.gguf_writer.add_context_length(1048576)
  4537. self.gguf_writer.add_embedding_length(hidden_size)
  4538. self.gguf_writer.add_block_count(block_count)
  4539. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  4540. self.gguf_writer.add_wkv_head_size(head_size)
  4541. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  4542. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  4543. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  4544. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  4545. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4546. self.gguf_writer.add_file_type(self.ftype)
  4547. # required by llama.cpp, unused
  4548. self.gguf_writer.add_head_count(0)
  4549. lerp_weights: dict[int, dict[str, Tensor]] = {}
  4550. lora_needs_transpose: bool = True
  4551. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4552. # unify tensor names here to make life easier
  4553. name = name.replace("blocks", "layers").replace("ffn", "feed_forward")
  4554. name = name.replace("self_attn", "attention").replace("attn", "attention")
  4555. name = name.replace("time_mixer.", "")
  4556. # lora layer names in fla-hub's impl
  4557. if "_lora.lora" in name:
  4558. self.lora_needs_transpose = False
  4559. name = name.replace("_lora.lora.0.weight", "1.weight")
  4560. name = name.replace("_lora.lora.2.weight", "2.weight")
  4561. name = name.replace("_lora.lora.2.bias", "0.weight")
  4562. name = name.replace("feed_forward_norm", "ln2")
  4563. name = name.replace("g_norm", "ln_x")
  4564. if "attention.v" in name and "value" not in self.map_tensor_name(name) and bid == 0:
  4565. # some models have dummy v0/v1/v2 on first layer while others don't
  4566. # ignore them all since they are not used
  4567. return
  4568. wkv_has_gate = self.hparams.get("wkv_has_gate", True)
  4569. lerp_list = ["r", "w", "k", "v", "a", "g"] if wkv_has_gate else ["r", "w", "k", "v", "a"]
  4570. if bid is not None and "attention.x_" in name:
  4571. if "attention.x_x" in name:
  4572. # already concatenated
  4573. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4574. data = data_torch.reshape(len(lerp_list), 1, 1, -1)
  4575. yield (new_name, data)
  4576. else:
  4577. try:
  4578. self.lerp_weights[bid][name] = data_torch
  4579. except KeyError:
  4580. self.lerp_weights[bid] = {name: data_torch}
  4581. if all(f"model.layers.{bid}.attention.x_{i}" in self.lerp_weights[bid].keys() for i in lerp_list):
  4582. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  4583. data = torch.stack([self.lerp_weights[bid][f"model.layers.{bid}.attention.x_{i}"] for i in lerp_list], dim=0)
  4584. yield (new_name, data)
  4585. return
  4586. else:
  4587. data_torch = data_torch.squeeze()
  4588. new_name = self.map_tensor_name(name)
  4589. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  4590. new_name += ".weight"
  4591. if self.lora_needs_transpose and any(
  4592. new_name.endswith(t) for t in [
  4593. "time_mix_w1.weight", "time_mix_w2.weight",
  4594. "time_mix_a1.weight", "time_mix_a2.weight",
  4595. "time_mix_v1.weight", "time_mix_v2.weight",
  4596. "time_mix_g1.weight", "time_mix_g2.weight",
  4597. ]
  4598. ):
  4599. data_torch = data_torch.transpose(0, 1)
  4600. if 'r_k' in new_name:
  4601. data_torch = data_torch.flatten()
  4602. if bid == 0 and "time_mix_a" in new_name:
  4603. # dummy v0/v1/v2 on first layer
  4604. # easist way to make llama happy
  4605. yield (new_name.replace("time_mix_a", "time_mix_v"), data_torch)
  4606. yield (new_name, data_torch)
  4607. @ModelBase.register("RwkvHybridForCausalLM")
  4608. class ARwkv7Model(Rwkv7Model):
  4609. model_arch = gguf.MODEL_ARCH.ARWKV7
  4610. def set_vocab(self):
  4611. try:
  4612. self._set_vocab_sentencepiece()
  4613. except FileNotFoundError:
  4614. self._set_vocab_gpt2()
  4615. def set_gguf_parameters(self):
  4616. block_count = self.hparams["num_hidden_layers"]
  4617. hidden_size = self.hparams["hidden_size"]
  4618. head_size = self.hparams["head_size"]
  4619. rms_norm_eps = self.hparams["rms_norm_eps"]
  4620. intermediate_size = self.hparams["intermediate_size"]
  4621. wkv_has_gate = self.hparams["wkv_has_gate"]
  4622. assert self.hparams["wkv_version"] == 7
  4623. # ICLR: In-Context-Learning-Rate
  4624. lora_rank_decay = 64
  4625. lora_rank_iclr = 64
  4626. lora_rank_value_residual_mix = 32
  4627. lora_rank_gate = 128 if wkv_has_gate else 0
  4628. # RWKV isn't context limited
  4629. self.gguf_writer.add_context_length(1048576)
  4630. self.gguf_writer.add_embedding_length(hidden_size)
  4631. self.gguf_writer.add_block_count(block_count)
  4632. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4633. self.gguf_writer.add_wkv_head_size(head_size)
  4634. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  4635. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  4636. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  4637. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  4638. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4639. self.gguf_writer.add_file_type(self.ftype)
  4640. self.gguf_writer.add_token_shift_count(1)
  4641. # required by llama.cpp, unused
  4642. self.gguf_writer.add_head_count(0)
  4643. @ModelBase.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
  4644. class MambaModel(TextModel):
  4645. model_arch = gguf.MODEL_ARCH.MAMBA
  4646. def __init__(self, dir_model: Path, *args, **kwargs):
  4647. # Avoid using AutoConfig for hparams
  4648. hparams = kwargs.pop("hparams", None)
  4649. if hparams is None:
  4650. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  4651. hparams = json.load(f)
  4652. super().__init__(dir_model, *args, hparams=hparams, **kwargs)
  4653. def set_vocab(self):
  4654. vocab_size = self.hparams["vocab_size"]
  4655. # Round vocab size to next multiple of 8
  4656. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  4657. # pad using ceiling division
  4658. # ref: https://stackoverflow.com/a/17511341/22827863
  4659. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  4660. self.hparams["vocab_size"] = vocab_size
  4661. if (self.dir_model / "tokenizer.json").is_file():
  4662. self._set_vocab_gpt2()
  4663. elif (self.dir_model / "tokenizer.model").is_file():
  4664. self._set_vocab_sentencepiece()
  4665. else:
  4666. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  4667. self._set_vocab_builtin("gpt-neox", vocab_size)
  4668. def set_gguf_parameters(self):
  4669. d_model = self.find_hparam(["hidden_size", "d_model"])
  4670. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  4671. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  4672. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  4673. # ceiling division
  4674. # ref: https://stackoverflow.com/a/17511341/22827863
  4675. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  4676. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  4677. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  4678. use_dt_b_c_norm = False
  4679. # For falconmamba we do apply RMS norm on B / DT and C layers
  4680. if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
  4681. use_dt_b_c_norm = True
  4682. # Fail early for models which don't have a block expansion factor of 2
  4683. assert d_inner == 2 * d_model
  4684. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  4685. self.gguf_writer.add_embedding_length(d_model)
  4686. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  4687. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  4688. self.gguf_writer.add_block_count(self.block_count)
  4689. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4690. self.gguf_writer.add_ssm_inner_size(d_inner)
  4691. self.gguf_writer.add_ssm_state_size(d_state)
  4692. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  4693. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4694. self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
  4695. self.gguf_writer.add_file_type(self.ftype)
  4696. _tok_embd = None
  4697. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4698. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  4699. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  4700. new_name = self.map_tensor_name(name)
  4701. if name.endswith(".A_log"):
  4702. logger.debug("A_log --> A ==> " + new_name)
  4703. data_torch = -torch.exp(data_torch)
  4704. # [4 1 8192 1] -> [4 8192 1 1]
  4705. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4706. data_torch = data_torch.squeeze()
  4707. # assuming token_embd.weight is seen before output.weight
  4708. if self._tok_embd is not None and new_name == output_name:
  4709. if torch.equal(self._tok_embd, data_torch):
  4710. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  4711. return []
  4712. elif new_name == tok_embd_name:
  4713. self._tok_embd = data_torch
  4714. return [(new_name, data_torch)]
  4715. @ModelBase.register("Mamba2ForCausalLM")
  4716. class Mamba2Model(TextModel):
  4717. model_arch = gguf.MODEL_ARCH.MAMBA2
  4718. def __init__(self, dir_model: Path, *args, **kwargs):
  4719. # Avoid using AutoConfig for hparams
  4720. # It wrongly assumes all Mamba2 models are Mamba-Codestral-7B-v0.1
  4721. hparams = kwargs.pop("hparams", None)
  4722. if hparams is None:
  4723. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  4724. hparams = json.load(f)
  4725. super().__init__(dir_model, *args, hparams=hparams, **kwargs)
  4726. self.d_model = self.find_hparam(["hidden_size", "d_model", "dim"])
  4727. self.d_inner = self.find_hparam(["mamba_d_ssm", "intermediate_size", "d_inner"], optional=True) or 2 * self.d_model
  4728. self.n_group = self.find_hparam(["n_groups"], optional=True) or 1
  4729. def set_vocab(self):
  4730. vocab_size = self.hparams["vocab_size"]
  4731. # Round vocab size to next multiple of 16
  4732. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 16)
  4733. # pad using ceiling division
  4734. # ref: https://stackoverflow.com/a/17511341/22827863
  4735. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  4736. self.hparams["vocab_size"] = vocab_size
  4737. if (self.dir_model / "tokenizer.model").is_file():
  4738. self._set_vocab_sentencepiece()
  4739. elif (self.dir_model / "tokenizer.model.v3").is_file():
  4740. # mamba-codestral
  4741. raise NotImplementedError(f"Please rename {self.dir_model / 'tokenizer.model.v3'} to {self.dir_model / 'tokenizer.model'}")
  4742. elif (self.dir_model / "tokenizer.json").is_file():
  4743. self._set_vocab_gpt2()
  4744. else:
  4745. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  4746. self._set_vocab_builtin("gpt-neox", vocab_size)
  4747. def set_gguf_parameters(self):
  4748. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  4749. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 128
  4750. head_dim = self.find_hparam(["mamba_d_head", "head_dim"], optional=True) or 64
  4751. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  4752. # Fail early for models which don't have a block expansion factor of 2
  4753. # TODO: does this really matter?
  4754. # skip the assertion for FalconH1 Model
  4755. if self.model_arch != gguf.MODEL_ARCH.FALCON_H1:
  4756. assert self.d_inner == 2 * self.d_model
  4757. assert self.d_inner % head_dim == 0
  4758. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  4759. self.gguf_writer.add_embedding_length(self.d_model)
  4760. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  4761. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  4762. self.gguf_writer.add_block_count(self.block_count)
  4763. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4764. self.gguf_writer.add_ssm_inner_size(self.d_inner)
  4765. self.gguf_writer.add_ssm_state_size(d_state)
  4766. self.gguf_writer.add_ssm_time_step_rank(self.d_inner // head_dim)
  4767. self.gguf_writer.add_ssm_group_count(self.n_group)
  4768. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4769. self.gguf_writer.add_file_type(self.ftype)
  4770. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4771. if name.startswith("model.backbone") or name.startswith("model.lm_head"):
  4772. # map Mamba-Codestral-7B-v0.1 tensor names to the names used by Mamba-2
  4773. name = name.removeprefix("model.")
  4774. if name.endswith(".dt_bias"):
  4775. name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
  4776. new_name = self.map_tensor_name(name)
  4777. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4778. data_torch = data_torch.squeeze()
  4779. elif any(self.match_model_tensor_name(new_name, t, bid, suffix="") for t in [
  4780. gguf.MODEL_TENSOR.SSM_A,
  4781. gguf.MODEL_TENSOR.SSM_D,
  4782. ]):
  4783. # unsqueeze A to use similar shape semantics as Mamba-1
  4784. # (D is also unsqueezed, but for more straightforward broadcast internally)
  4785. data_torch = data_torch.reshape((*data_torch.shape, 1))
  4786. elif self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_NORM, bid):
  4787. data_torch = data_torch.reshape((self.n_group, self.d_inner // self.n_group))
  4788. if name.endswith(".A_log"):
  4789. logger.debug("A_log --> A ==> " + new_name)
  4790. data_torch = -torch.exp(data_torch)
  4791. yield (new_name, data_torch)
  4792. @ModelBase.register("JambaForCausalLM")
  4793. class JambaModel(TextModel):
  4794. model_arch = gguf.MODEL_ARCH.JAMBA
  4795. def get_vocab_base_pre(self, tokenizer) -> str:
  4796. del tokenizer # unused
  4797. return "gpt-2"
  4798. def set_vocab(self):
  4799. if (self.dir_model / "tokenizer.model").is_file():
  4800. # Using Jamba's tokenizer.json causes errors on model load
  4801. # (something about "byte not found in vocab"),
  4802. # but there's a working tokenizer.model
  4803. self._set_vocab_sentencepiece()
  4804. else:
  4805. # Some Jamba models only have a tokenizer.json, which works.
  4806. self._set_vocab_gpt2()
  4807. def set_gguf_parameters(self):
  4808. d_model = self.find_hparam(["hidden_size", "mamba_d_model"])
  4809. d_conv = self.find_hparam(["mamba_d_conv"], optional=True) or 4
  4810. d_inner = self.hparams["mamba_expand"] * d_model
  4811. d_state = self.find_hparam(["mamba_d_state"], optional=True) or 16
  4812. # ceiling division
  4813. # ref: https://stackoverflow.com/a/17511341/22827863
  4814. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  4815. dt_rank = self.find_hparam(["mamba_dt_rank"], optional=True) or -(d_model // -16)
  4816. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-6
  4817. n_kv_head = self.hparams["num_key_value_heads"]
  4818. attn_offset = self.hparams["attn_layer_offset"]
  4819. attn_period = self.hparams["attn_layer_period"]
  4820. n_kv_vec = [0 for _ in range(attn_offset)] + [
  4821. n_kv_head if (i - attn_offset) % attn_period == 0 else 0 for i in range(attn_offset, self.block_count)
  4822. ]
  4823. self.gguf_writer.add_block_count(self.block_count)
  4824. self.gguf_writer.add_context_length(self.find_hparam(["max_position_embeddings", "n_ctx"]))
  4825. self.gguf_writer.add_embedding_length(d_model)
  4826. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  4827. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  4828. self.gguf_writer.add_head_count_kv(n_kv_vec)
  4829. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  4830. self.gguf_writer.add_ssm_inner_size(d_inner)
  4831. self.gguf_writer.add_ssm_state_size(d_state)
  4832. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  4833. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  4834. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  4835. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  4836. self.gguf_writer.add_file_type(self.ftype)
  4837. _experts: list[dict[str, Tensor]] | None = None
  4838. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4839. # Mini-Jamba
  4840. name = name.replace(".moe.", ".feed_forward.")
  4841. if bid is not None:
  4842. moe_offset = self.hparams["expert_layer_offset"]
  4843. moe_period = self.hparams["expert_layer_period"]
  4844. if not (bid >= moe_offset and (bid - moe_offset) % moe_period == 0):
  4845. name = name.replace(".experts.0.", ".")
  4846. # process the experts separately
  4847. if ".feed_forward.experts." in name:
  4848. n_experts = self.hparams["num_experts"]
  4849. assert bid is not None
  4850. if self._experts is None:
  4851. self._experts = [{} for _ in range(self.block_count)]
  4852. self._experts[bid][name] = data_torch
  4853. if len(self._experts[bid]) >= n_experts * 3:
  4854. # merge the experts into a single 3d tensor
  4855. for wid in ["down_proj", "gate_proj", "up_proj"]:
  4856. datas: list[Tensor] = []
  4857. for xid in range(n_experts):
  4858. ename = f"model.layers.{bid}.feed_forward.experts.{xid}.{wid}.weight"
  4859. datas.append(self._experts[bid][ename])
  4860. del self._experts[bid][ename]
  4861. data_torch = torch.stack(datas, dim=0)
  4862. # using the same merged name as qwen2moe
  4863. merged_name = f"model.layers.{bid}.mlp.experts.{wid}.weight"
  4864. new_name = self.map_tensor_name(merged_name)
  4865. yield new_name, data_torch
  4866. return
  4867. new_name = self.map_tensor_name(name)
  4868. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  4869. data_torch = data_torch.squeeze()
  4870. if name.endswith(".A_log"):
  4871. logger.debug("A_log --> A ==> " + new_name)
  4872. data_torch = -torch.exp(data_torch)
  4873. yield (new_name, data_torch)
  4874. def prepare_tensors(self):
  4875. super().prepare_tensors()
  4876. if self._experts is not None:
  4877. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4878. experts = [k for d in self._experts for k in d.keys()]
  4879. if len(experts) > 0:
  4880. raise ValueError(f"Unprocessed experts: {experts}")
  4881. @ModelBase.register("CohereForCausalLM")
  4882. class CommandR2Model(TextModel):
  4883. model_arch = gguf.MODEL_ARCH.COMMAND_R
  4884. def __init__(self, *args, **kwargs):
  4885. super().__init__(*args, **kwargs)
  4886. # max_position_embeddings = 8192 in config.json but model was actually
  4887. # trained on 128k context length
  4888. # aya-23 models don't have model_max_length specified
  4889. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  4890. def set_gguf_parameters(self):
  4891. super().set_gguf_parameters()
  4892. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  4893. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4894. @ModelBase.register("Cohere2ForCausalLM")
  4895. class Cohere2Model(TextModel):
  4896. model_arch = gguf.MODEL_ARCH.COHERE2
  4897. def set_gguf_parameters(self):
  4898. super().set_gguf_parameters()
  4899. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  4900. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  4901. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  4902. rotary_pct = self.hparams["rotary_pct"]
  4903. hidden_size = self.hparams["hidden_size"]
  4904. num_attention_heads = self.hparams["num_attention_heads"]
  4905. self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
  4906. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4907. @ModelBase.register("OlmoForCausalLM")
  4908. @ModelBase.register("OLMoForCausalLM")
  4909. class OlmoModel(TextModel):
  4910. model_arch = gguf.MODEL_ARCH.OLMO
  4911. def set_gguf_parameters(self):
  4912. super().set_gguf_parameters()
  4913. self.gguf_writer.add_layer_norm_eps(1e-5)
  4914. clip_qkv = self.hparams.get("clip_qkv")
  4915. if clip_qkv is not None:
  4916. self.gguf_writer.add_clamp_kqv(clip_qkv)
  4917. # Same as super class, but permuting q_proj, k_proj
  4918. # Copied from: LlamaModel
  4919. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4920. del bid # unused
  4921. n_head = self.hparams["num_attention_heads"]
  4922. n_kv_head = self.hparams.get("num_key_value_heads")
  4923. if name.endswith("q_proj.weight"):
  4924. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  4925. if name.endswith("k_proj.weight"):
  4926. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  4927. return [(self.map_tensor_name(name), data_torch)]
  4928. @ModelBase.register("SeedOssForCausalLM")
  4929. class SeedOssModel(TextModel):
  4930. model_arch = gguf.MODEL_ARCH.SEED_OSS
  4931. @ModelBase.register("Olmo2ForCausalLM")
  4932. @ModelBase.register("Olmo3ForCausalLM")
  4933. class Olmo2Model(TextModel):
  4934. model_arch = gguf.MODEL_ARCH.OLMO2
  4935. def set_gguf_parameters(self):
  4936. super().set_gguf_parameters()
  4937. rope_scaling = self.hparams.get("rope_scaling") or {}
  4938. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4939. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4940. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4941. self.gguf_writer.add_rope_scaling_attn_factors(rope_scaling["attention_factor"])
  4942. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4943. if "sliding_window" in self.hparams:
  4944. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  4945. sliding_window_pattern = []
  4946. if "layer_types" in self.hparams:
  4947. sliding_window_pattern = [t == "sliding_attention" for t in self.hparams["layer_types"]]
  4948. else:
  4949. # Olmo2 does not use sliding window attention.
  4950. # Olmo3 defaults to using sliding window for all layers except every 4th.
  4951. for i in range(self.hparams["num_hidden_layers"]):
  4952. sliding_window_pattern.append((i + 1) % 4 != 0)
  4953. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  4954. @ModelBase.register("OlmoeForCausalLM")
  4955. class OlmoeModel(TextModel):
  4956. model_arch = gguf.MODEL_ARCH.OLMOE
  4957. def set_gguf_parameters(self):
  4958. super().set_gguf_parameters()
  4959. self.gguf_writer.add_layer_norm_rms_eps(1e-5)
  4960. if (n_experts := self.hparams.get("num_experts")) is not None:
  4961. self.gguf_writer.add_expert_count(n_experts)
  4962. _experts: list[dict[str, Tensor]] | None = None
  4963. # Copied from: Qwen2MoeModel
  4964. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4965. # process the experts separately
  4966. if name.find("experts") != -1:
  4967. n_experts = self.hparams["num_experts"]
  4968. assert bid is not None
  4969. if self._experts is None:
  4970. self._experts = [{} for _ in range(self.block_count)]
  4971. self._experts[bid][name] = data_torch
  4972. if len(self._experts[bid]) >= n_experts * 3:
  4973. tensors: list[tuple[str, Tensor]] = []
  4974. # merge the experts into a single 3d tensor
  4975. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4976. datas: list[Tensor] = []
  4977. for xid in range(n_experts):
  4978. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4979. datas.append(self._experts[bid][ename])
  4980. del self._experts[bid][ename]
  4981. data_torch = torch.stack(datas, dim=0)
  4982. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  4983. new_name = self.map_tensor_name(merged_name)
  4984. tensors.append((new_name, data_torch))
  4985. return tensors
  4986. else:
  4987. return []
  4988. return [(self.map_tensor_name(name), data_torch)]
  4989. # Copied from: Qwen2MoeModel
  4990. def prepare_tensors(self):
  4991. super().prepare_tensors()
  4992. if self._experts is not None:
  4993. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4994. experts = [k for d in self._experts for k in d.keys()]
  4995. if len(experts) > 0:
  4996. raise ValueError(f"Unprocessed experts: {experts}")
  4997. @ModelBase.register("JinaBertModel", "JinaBertForMaskedLM")
  4998. class JinaBertV2Model(BertModel):
  4999. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  5000. def set_vocab(self):
  5001. tokenizer_class = 'BertTokenizer'
  5002. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  5003. tokenizer_class = json.load(f)['tokenizer_class']
  5004. if tokenizer_class == 'BertTokenizer':
  5005. super().set_vocab()
  5006. elif tokenizer_class == 'RobertaTokenizer':
  5007. self._set_vocab_gpt2()
  5008. self.gguf_writer.add_token_type_count(2)
  5009. else:
  5010. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  5011. @ModelBase.register("OpenELMForCausalLM")
  5012. class OpenELMModel(TextModel):
  5013. model_arch = gguf.MODEL_ARCH.OPENELM
  5014. @staticmethod
  5015. def _make_divisible(v: float | int, divisor: int) -> int:
  5016. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  5017. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  5018. # Make sure that round down does not go down by more than 10%.
  5019. if new_v < 0.9 * v:
  5020. new_v += divisor
  5021. return new_v
  5022. def __init__(self, *args, **kwargs):
  5023. super().__init__(*args, **kwargs)
  5024. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  5025. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  5026. self._n_embd: int = self.hparams["model_dim"]
  5027. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  5028. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  5029. self._ffn_dims: list[int] = [
  5030. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  5031. for multiplier in ffn_multipliers
  5032. ]
  5033. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  5034. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  5035. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  5036. def set_vocab(self):
  5037. try:
  5038. self._set_vocab_sentencepiece()
  5039. except FileNotFoundError:
  5040. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  5041. def set_gguf_parameters(self):
  5042. n_embd = self._n_embd
  5043. head_dim = self.hparams["head_dim"]
  5044. rot_pct = 1.0
  5045. assert self.block_count == len(self._num_kv_heads)
  5046. assert self.block_count == len(self._num_query_heads)
  5047. assert self.block_count == len(self._ffn_dims)
  5048. self.gguf_writer.add_block_count(self.block_count)
  5049. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  5050. self.gguf_writer.add_embedding_length(n_embd)
  5051. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  5052. self.gguf_writer.add_head_count(self._num_query_heads)
  5053. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  5054. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  5055. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  5056. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  5057. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  5058. self.gguf_writer.add_key_length(head_dim)
  5059. self.gguf_writer.add_value_length(head_dim)
  5060. self.gguf_writer.add_file_type(self.ftype)
  5061. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  5062. if "n_layers" in keys:
  5063. return self.hparams["num_transformer_layers"]
  5064. return super().find_hparam(keys, optional)
  5065. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5066. # split ff
  5067. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  5068. ff_dim = self._ffn_dims[bid]
  5069. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  5070. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  5071. return
  5072. yield (self.map_tensor_name(name), data_torch)
  5073. @ModelBase.register("ArcticForCausalLM")
  5074. class ArcticModel(TextModel):
  5075. model_arch = gguf.MODEL_ARCH.ARCTIC
  5076. def set_vocab(self):
  5077. # The reason for using a custom implementation here is that the
  5078. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  5079. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  5080. from sentencepiece import SentencePieceProcessor
  5081. tokenizer_path = self.dir_model / 'tokenizer.model'
  5082. if not tokenizer_path.is_file():
  5083. logger.error(f'Error: Missing {tokenizer_path}')
  5084. sys.exit(1)
  5085. # Read the whole vocabulary from the tokenizer.model file
  5086. tokenizer = SentencePieceProcessor()
  5087. tokenizer.LoadFromFile(str(tokenizer_path))
  5088. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5089. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5090. scores: list[float] = [-10000.0] * vocab_size
  5091. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5092. for token_id in range(tokenizer.vocab_size()):
  5093. piece = tokenizer.IdToPiece(token_id)
  5094. text = piece.encode("utf-8")
  5095. score = tokenizer.GetScore(token_id)
  5096. toktype = SentencePieceTokenTypes.NORMAL
  5097. if tokenizer.IsUnknown(token_id):
  5098. toktype = SentencePieceTokenTypes.UNKNOWN
  5099. elif tokenizer.IsControl(token_id):
  5100. toktype = SentencePieceTokenTypes.CONTROL
  5101. elif tokenizer.IsUnused(token_id):
  5102. toktype = SentencePieceTokenTypes.UNUSED
  5103. elif tokenizer.IsByte(token_id):
  5104. toktype = SentencePieceTokenTypes.BYTE
  5105. tokens[token_id] = text
  5106. scores[token_id] = score
  5107. toktypes[token_id] = toktype
  5108. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  5109. # of information about added/redefined tokens and modify them accordingly.
  5110. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  5111. if tokenizer_config_file.is_file():
  5112. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  5113. tokenizer_config_json = json.load(f)
  5114. if "added_tokens_decoder" in tokenizer_config_json:
  5115. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  5116. for token_id, token_json in added_tokens_decoder.items():
  5117. token_id = int(token_id)
  5118. if token_id >= vocab_size:
  5119. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5120. continue
  5121. token_content = token_json["content"]
  5122. token_type = SentencePieceTokenTypes.USER_DEFINED
  5123. token_score = -10000.0
  5124. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  5125. # Set the score to 0.0 as in the original tokenizer.model
  5126. if ("special" in token_json) and token_json["special"]:
  5127. if token_content == tokenizer_config_json["unk_token"]:
  5128. token_type = SentencePieceTokenTypes.UNKNOWN
  5129. else:
  5130. token_type = SentencePieceTokenTypes.CONTROL
  5131. token_score = 0.0
  5132. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  5133. tokens[token_id] = token_content.encode("utf-8")
  5134. toktypes[token_id] = token_type
  5135. scores[token_id] = token_score
  5136. self.gguf_writer.add_tokenizer_model("llama")
  5137. self.gguf_writer.add_tokenizer_pre("default")
  5138. self.gguf_writer.add_token_list(tokens)
  5139. self.gguf_writer.add_token_scores(scores)
  5140. self.gguf_writer.add_token_types(toktypes)
  5141. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5142. special_vocab.add_to_gguf(self.gguf_writer)
  5143. def set_gguf_parameters(self):
  5144. super().set_gguf_parameters()
  5145. hparams = self.hparams
  5146. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5147. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  5148. _experts: list[dict[str, Tensor]] | None = None
  5149. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5150. n_head = self.hparams["num_attention_heads"]
  5151. n_kv_head = self.hparams.get("num_key_value_heads")
  5152. if name.endswith("q_proj.weight"):
  5153. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  5154. if name.endswith("k_proj.weight"):
  5155. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  5156. # process the experts separately
  5157. if name.find("block_sparse_moe.experts") != -1:
  5158. n_experts = self.hparams["num_local_experts"]
  5159. assert bid is not None
  5160. if self._experts is None:
  5161. self._experts = [{} for _ in range(self.block_count)]
  5162. self._experts[bid][name] = data_torch
  5163. if len(self._experts[bid]) >= n_experts * 3:
  5164. tensors: list[tuple[str, Tensor]] = []
  5165. # merge the experts into a single 3d tensor
  5166. for wid in ["w1", "w2", "w3"]:
  5167. datas: list[Tensor] = []
  5168. for xid in range(n_experts):
  5169. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  5170. datas.append(self._experts[bid][ename])
  5171. del self._experts[bid][ename]
  5172. data_torch = torch.stack(datas, dim=0)
  5173. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  5174. new_name = self.map_tensor_name(merged_name)
  5175. tensors.append((new_name, data_torch))
  5176. return tensors
  5177. else:
  5178. return []
  5179. return [(self.map_tensor_name(name), data_torch)]
  5180. def prepare_tensors(self):
  5181. super().prepare_tensors()
  5182. if self._experts is not None:
  5183. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5184. experts = [k for d in self._experts for k in d.keys()]
  5185. if len(experts) > 0:
  5186. raise ValueError(f"Unprocessed experts: {experts}")
  5187. @ModelBase.register("DeepseekForCausalLM")
  5188. class DeepseekModel(TextModel):
  5189. model_arch = gguf.MODEL_ARCH.DEEPSEEK
  5190. def set_vocab(self):
  5191. try:
  5192. self._set_vocab_sentencepiece()
  5193. except FileNotFoundError:
  5194. self._set_vocab_gpt2()
  5195. def set_gguf_parameters(self):
  5196. super().set_gguf_parameters()
  5197. hparams = self.hparams
  5198. if (rope_dim := hparams.get("head_dim")) is None:
  5199. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  5200. self.gguf_writer.add_rope_dimension_count(rope_dim)
  5201. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  5202. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  5203. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5204. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  5205. self.gguf_writer.add_expert_weights_scale(1.0)
  5206. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  5207. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  5208. _experts: list[dict[str, Tensor]] | None = None
  5209. @staticmethod
  5210. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  5211. if n_head_kv is not None and n_head != n_head_kv:
  5212. n_head = n_head_kv
  5213. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  5214. .swapaxes(1, 2)
  5215. .reshape(weights.shape))
  5216. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5217. n_head = self.hparams["num_attention_heads"]
  5218. n_kv_head = self.hparams.get("num_key_value_heads")
  5219. if name.endswith(("q_proj.weight", "q_proj.bias")):
  5220. data_torch = DeepseekModel.permute(data_torch, n_head, n_head)
  5221. if name.endswith(("k_proj.weight", "k_proj.bias")):
  5222. data_torch = DeepseekModel.permute(data_torch, n_head, n_kv_head)
  5223. # process the experts separately
  5224. if name.find("mlp.experts") != -1:
  5225. n_experts = self.hparams["n_routed_experts"]
  5226. assert bid is not None
  5227. if self._experts is None:
  5228. self._experts = [{} for _ in range(self.block_count)]
  5229. self._experts[bid][name] = data_torch
  5230. if len(self._experts[bid]) >= n_experts * 3:
  5231. tensors: list[tuple[str, Tensor]] = []
  5232. # merge the experts into a single 3d tensor
  5233. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5234. datas: list[Tensor] = []
  5235. for xid in range(n_experts):
  5236. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5237. datas.append(self._experts[bid][ename])
  5238. del self._experts[bid][ename]
  5239. data_torch = torch.stack(datas, dim=0)
  5240. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5241. new_name = self.map_tensor_name(merged_name)
  5242. tensors.append((new_name, data_torch))
  5243. return tensors
  5244. else:
  5245. return []
  5246. return [(self.map_tensor_name(name), data_torch)]
  5247. def prepare_tensors(self):
  5248. super().prepare_tensors()
  5249. if self._experts is not None:
  5250. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5251. experts = [k for d in self._experts for k in d.keys()]
  5252. if len(experts) > 0:
  5253. raise ValueError(f"Unprocessed experts: {experts}")
  5254. @ModelBase.register(
  5255. "DeepseekV2ForCausalLM",
  5256. "DeepseekV3ForCausalLM",
  5257. "KimiVLForConditionalGeneration",
  5258. )
  5259. class DeepseekV2Model(TextModel):
  5260. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  5261. def set_vocab(self):
  5262. try:
  5263. self._set_vocab_gpt2()
  5264. return
  5265. except Exception:
  5266. pass
  5267. from transformers import AutoTokenizer
  5268. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  5269. tokpre = self.get_vocab_base_pre(tokenizer)
  5270. if tokpre == "kimi-k2":
  5271. # Build merges list using the approach similar to HunYuanMoE
  5272. merges = []
  5273. vocab = {}
  5274. mergeable_ranks = tokenizer.model._mergeable_ranks
  5275. for token, rank in mergeable_ranks.items():
  5276. vocab[QwenModel.token_bytes_to_string(token)] = rank
  5277. if len(token) == 1:
  5278. continue
  5279. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  5280. if len(merged) == 2:
  5281. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  5282. # Build token list
  5283. vocab_size = self.hparams["vocab_size"]
  5284. special_tokens = tokenizer.special_tokens
  5285. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  5286. tokens: list[str] = []
  5287. toktypes: list[int] = []
  5288. for i in range(vocab_size):
  5289. if i not in reverse_vocab:
  5290. tokens.append(f"[PAD{i}]")
  5291. toktypes.append(gguf.TokenType.UNUSED)
  5292. else:
  5293. token = reverse_vocab[i]
  5294. tokens.append(token)
  5295. if i in special_tokens.values():
  5296. toktypes.append(gguf.TokenType.CONTROL)
  5297. else:
  5298. toktypes.append(gguf.TokenType.NORMAL)
  5299. self.gguf_writer.add_tokenizer_model("gpt2")
  5300. self.gguf_writer.add_tokenizer_pre(tokpre)
  5301. self.gguf_writer.add_token_list(tokens)
  5302. self.gguf_writer.add_token_types(toktypes)
  5303. self.gguf_writer.add_token_merges(merges)
  5304. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  5305. special_vocab.add_to_gguf(self.gguf_writer)
  5306. else:
  5307. raise NotImplementedError(f"Deepseek pre-tokenizer {tokpre!r} is not supported yet!")
  5308. def set_gguf_parameters(self):
  5309. # note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
  5310. self.hparams["num_key_value_heads"] = 1
  5311. super().set_gguf_parameters()
  5312. hparams = self.hparams
  5313. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  5314. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5315. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  5316. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  5317. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  5318. # note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
  5319. self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
  5320. self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
  5321. self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  5322. self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
  5323. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  5324. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  5325. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  5326. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  5327. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  5328. if hparams["scoring_func"] == "sigmoid":
  5329. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5330. elif hparams["scoring_func"] == "softmax":
  5331. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  5332. else:
  5333. raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
  5334. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  5335. rope_scaling = self.hparams.get("rope_scaling") or {}
  5336. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  5337. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  5338. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  5339. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  5340. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * rope_scaling["mscale_all_dim"])
  5341. _experts: list[dict[str, Tensor]] | None = None
  5342. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5343. # skip vision tensors and remove "language_model." for Kimi-VL
  5344. if "vision_tower" in name or "multi_modal_projector" in name:
  5345. return []
  5346. if name.startswith("language_model."):
  5347. name = name.replace("language_model.", "")
  5348. # rename e_score_correction_bias tensors
  5349. if name.endswith("e_score_correction_bias"):
  5350. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5351. # skip Multi-Token Prediction (MTP) layers
  5352. block_count = self.hparams["num_hidden_layers"]
  5353. match = re.match(r"model.layers.(\d+)", name)
  5354. if match and int(match.group(1)) >= block_count:
  5355. return []
  5356. # process the experts separately
  5357. if name.find("mlp.experts") != -1:
  5358. n_experts = self.hparams["n_routed_experts"]
  5359. assert bid is not None
  5360. if self._experts is None:
  5361. self._experts = [{} for _ in range(self.block_count)]
  5362. self._experts[bid][name] = data_torch
  5363. if len(self._experts[bid]) >= n_experts * 3:
  5364. tensors: list[tuple[str, Tensor]] = []
  5365. # merge the experts into a single 3d tensor
  5366. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5367. datas: list[Tensor] = []
  5368. for xid in range(n_experts):
  5369. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5370. datas.append(self._experts[bid][ename])
  5371. del self._experts[bid][ename]
  5372. data_torch = torch.stack(datas, dim=0)
  5373. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5374. new_name = self.map_tensor_name(merged_name)
  5375. tensors.append((new_name, data_torch))
  5376. return tensors
  5377. else:
  5378. return []
  5379. # note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
  5380. if name.endswith("kv_b_proj.weight"):
  5381. name_kb = name.replace("kv_b_proj", "k_b_proj")
  5382. name_vb = name.replace("kv_b_proj", "v_b_proj")
  5383. n_head_kv = self.hparams["num_key_value_heads"]
  5384. v_head_dim = self.hparams["v_head_dim"]
  5385. qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
  5386. assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
  5387. kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
  5388. k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
  5389. k_b = k_b.transpose(1, 2)
  5390. return [
  5391. (self.map_tensor_name(name_kb), k_b),
  5392. (self.map_tensor_name(name_vb), v_b)
  5393. ]
  5394. return [(self.map_tensor_name(name), data_torch)]
  5395. def prepare_tensors(self):
  5396. super().prepare_tensors()
  5397. if self._experts is not None:
  5398. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5399. experts = [k for d in self._experts for k in d.keys()]
  5400. if len(experts) > 0:
  5401. raise ValueError(f"Unprocessed experts: {experts}")
  5402. @ModelBase.register("Dots1ForCausalLM")
  5403. class Dots1Model(Qwen2MoeModel):
  5404. model_arch = gguf.MODEL_ARCH.DOTS1
  5405. def __init__(self, *args, **kwargs):
  5406. super().__init__(*args, **kwargs)
  5407. self.hparams["num_experts"] = self.hparams["n_routed_experts"]
  5408. def set_gguf_parameters(self):
  5409. super().set_gguf_parameters()
  5410. self.gguf_writer.add_leading_dense_block_count(self.hparams["first_k_dense_replace"])
  5411. self.gguf_writer.add_expert_shared_count(self.hparams["n_shared_experts"])
  5412. self.gguf_writer.add_expert_weights_scale(self.hparams["routed_scaling_factor"])
  5413. self.gguf_writer.add_expert_weights_norm(self.hparams["norm_topk_prob"])
  5414. if self.hparams["scoring_func"] == "noaux_tc":
  5415. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5416. else:
  5417. raise ValueError(f"Unsupported scoring_func value: {self.hparams['scoring_func']}")
  5418. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  5419. if name.endswith("e_score_correction_bias"):
  5420. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5421. if "shared_experts" in name:
  5422. return [(self.map_tensor_name(name), data_torch)]
  5423. return super().modify_tensors(data_torch, name, bid)
  5424. @ModelBase.register("PLMForCausalLM")
  5425. class PLMModel(TextModel):
  5426. model_arch = gguf.MODEL_ARCH.PLM
  5427. def set_vocab(self):
  5428. self._set_vocab_gpt2()
  5429. def set_gguf_parameters(self):
  5430. super().set_gguf_parameters()
  5431. hparams = self.hparams
  5432. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  5433. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  5434. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  5435. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  5436. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  5437. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5438. return [(self.map_tensor_name(name), data_torch)]
  5439. def prepare_tensors(self):
  5440. super().prepare_tensors()
  5441. @ModelBase.register("T5WithLMHeadModel")
  5442. @ModelBase.register("T5ForConditionalGeneration")
  5443. @ModelBase.register("MT5ForConditionalGeneration")
  5444. @ModelBase.register("UMT5ForConditionalGeneration")
  5445. class T5Model(TextModel):
  5446. model_arch = gguf.MODEL_ARCH.T5
  5447. def __init__(self, *args, **kwargs):
  5448. super().__init__(*args, **kwargs)
  5449. self.shared_token_embeddings_found = False
  5450. def set_vocab(self):
  5451. # to avoid TypeError: Descriptors cannot be created directly
  5452. # exception when importing sentencepiece_model_pb2
  5453. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  5454. from sentencepiece import SentencePieceProcessor
  5455. from sentencepiece import sentencepiece_model_pb2 as model
  5456. tokenizer_path = self.dir_model / 'tokenizer.model'
  5457. # many older models use spiece.model tokenizer model filename
  5458. if not tokenizer_path.is_file():
  5459. tokenizer_path = self.dir_model / 'spiece.model'
  5460. if not tokenizer_path.is_file():
  5461. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  5462. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  5463. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  5464. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  5465. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  5466. # assure the tokenizer model file name is correct
  5467. assert tokenizer_path.name == 'tokenizer.model'
  5468. return self._set_vocab_sentencepiece()
  5469. else:
  5470. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  5471. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  5472. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  5473. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  5474. tokenizer = SentencePieceProcessor()
  5475. tokenizer.LoadFromFile(str(tokenizer_path))
  5476. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5477. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5478. scores: list[float] = [-10000.0] * vocab_size
  5479. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5480. for token_id in range(tokenizer.vocab_size()):
  5481. piece = tokenizer.IdToPiece(token_id)
  5482. text = piece.encode("utf-8")
  5483. score = tokenizer.GetScore(token_id)
  5484. toktype = SentencePieceTokenTypes.NORMAL
  5485. if tokenizer.IsUnknown(token_id):
  5486. toktype = SentencePieceTokenTypes.UNKNOWN
  5487. elif tokenizer.IsControl(token_id):
  5488. toktype = SentencePieceTokenTypes.CONTROL
  5489. elif tokenizer.IsUnused(token_id):
  5490. toktype = SentencePieceTokenTypes.UNUSED
  5491. elif tokenizer.IsByte(token_id):
  5492. toktype = SentencePieceTokenTypes.BYTE
  5493. tokens[token_id] = text
  5494. scores[token_id] = score
  5495. toktypes[token_id] = toktype
  5496. added_tokens_file = self.dir_model / 'added_tokens.json'
  5497. if added_tokens_file.is_file():
  5498. with open(added_tokens_file, "r", encoding="utf-8") as f:
  5499. added_tokens_json = json.load(f)
  5500. for key in added_tokens_json:
  5501. token_id = added_tokens_json[key]
  5502. if token_id >= vocab_size:
  5503. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5504. continue
  5505. tokens[token_id] = key.encode("utf-8")
  5506. scores[token_id] = -1000.0
  5507. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  5508. if vocab_size > len(tokens):
  5509. pad_count = vocab_size - len(tokens)
  5510. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  5511. for i in range(1, pad_count + 1):
  5512. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  5513. scores.append(-1000.0)
  5514. toktypes.append(SentencePieceTokenTypes.UNUSED)
  5515. self.gguf_writer.add_tokenizer_model("t5")
  5516. self.gguf_writer.add_tokenizer_pre("default")
  5517. self.gguf_writer.add_token_list(tokens)
  5518. self.gguf_writer.add_token_scores(scores)
  5519. self.gguf_writer.add_token_types(toktypes)
  5520. self.gguf_writer.add_add_space_prefix(add_prefix)
  5521. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  5522. if precompiled_charsmap:
  5523. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  5524. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5525. special_vocab.add_to_gguf(self.gguf_writer)
  5526. def set_gguf_parameters(self):
  5527. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  5528. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  5529. n_ctx = 512
  5530. self.gguf_writer.add_context_length(n_ctx)
  5531. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  5532. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  5533. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  5534. if (dec_n_layer := self.hparams.get("num_decoder_layers")) is not None:
  5535. self.gguf_writer.add_decoder_block_count(dec_n_layer)
  5536. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  5537. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  5538. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  5539. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5540. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  5541. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  5542. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  5543. self.gguf_writer.add_file_type(self.ftype)
  5544. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5545. del bid # unused
  5546. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  5547. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  5548. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  5549. # and decoder and ignore the remaining ones.
  5550. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  5551. if not self.shared_token_embeddings_found:
  5552. name = "shared.weight"
  5553. self.shared_token_embeddings_found = True
  5554. else:
  5555. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  5556. return []
  5557. return [(self.map_tensor_name(name), data_torch)]
  5558. @ModelBase.register("T5EncoderModel")
  5559. class T5EncoderModel(TextModel):
  5560. model_arch = gguf.MODEL_ARCH.T5ENCODER
  5561. def __init__(self, *args, **kwargs):
  5562. super().__init__(*args, **kwargs)
  5563. self.shared_token_embeddings_found = False
  5564. def set_vocab(self):
  5565. # to avoid TypeError: Descriptors cannot be created directly
  5566. # exception when importing sentencepiece_model_pb2
  5567. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  5568. from sentencepiece import SentencePieceProcessor
  5569. from sentencepiece import sentencepiece_model_pb2 as model
  5570. tokenizer_path = self.dir_model / 'tokenizer.model'
  5571. # many older models use spiece.model tokenizer model filename
  5572. if not tokenizer_path.is_file():
  5573. tokenizer_path = self.dir_model / 'spiece.model'
  5574. if not tokenizer_path.is_file():
  5575. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  5576. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  5577. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  5578. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  5579. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  5580. # assure the tokenizer model file name is correct
  5581. assert tokenizer_path.name == 'tokenizer.model'
  5582. return self._set_vocab_sentencepiece()
  5583. else:
  5584. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  5585. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  5586. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  5587. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  5588. tokenizer = SentencePieceProcessor()
  5589. tokenizer.LoadFromFile(str(tokenizer_path))
  5590. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  5591. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  5592. scores: list[float] = [-10000.0] * vocab_size
  5593. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  5594. for token_id in range(tokenizer.vocab_size()):
  5595. piece = tokenizer.IdToPiece(token_id)
  5596. text = piece.encode("utf-8")
  5597. score = tokenizer.GetScore(token_id)
  5598. toktype = SentencePieceTokenTypes.NORMAL
  5599. if tokenizer.IsUnknown(token_id):
  5600. toktype = SentencePieceTokenTypes.UNKNOWN
  5601. elif tokenizer.IsControl(token_id):
  5602. toktype = SentencePieceTokenTypes.CONTROL
  5603. elif tokenizer.IsUnused(token_id):
  5604. toktype = SentencePieceTokenTypes.UNUSED
  5605. elif tokenizer.IsByte(token_id):
  5606. toktype = SentencePieceTokenTypes.BYTE
  5607. tokens[token_id] = text
  5608. scores[token_id] = score
  5609. toktypes[token_id] = toktype
  5610. added_tokens_file = self.dir_model / 'added_tokens.json'
  5611. if added_tokens_file.is_file():
  5612. with open(added_tokens_file, "r", encoding="utf-8") as f:
  5613. added_tokens_json = json.load(f)
  5614. for key in added_tokens_json:
  5615. token_id = added_tokens_json[key]
  5616. if token_id >= vocab_size:
  5617. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  5618. continue
  5619. tokens[token_id] = key.encode("utf-8")
  5620. scores[token_id] = -1000.0
  5621. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  5622. if vocab_size > len(tokens):
  5623. pad_count = vocab_size - len(tokens)
  5624. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  5625. for i in range(1, pad_count + 1):
  5626. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  5627. scores.append(-1000.0)
  5628. toktypes.append(SentencePieceTokenTypes.UNUSED)
  5629. self.gguf_writer.add_tokenizer_model("t5")
  5630. self.gguf_writer.add_tokenizer_pre("default")
  5631. self.gguf_writer.add_token_list(tokens)
  5632. self.gguf_writer.add_token_scores(scores)
  5633. self.gguf_writer.add_token_types(toktypes)
  5634. self.gguf_writer.add_add_space_prefix(add_prefix)
  5635. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  5636. if precompiled_charsmap:
  5637. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  5638. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5639. special_vocab.add_to_gguf(self.gguf_writer)
  5640. def set_gguf_parameters(self):
  5641. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  5642. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  5643. n_ctx = 512
  5644. self.gguf_writer.add_context_length(n_ctx)
  5645. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  5646. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  5647. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  5648. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  5649. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  5650. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  5651. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5652. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  5653. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  5654. self.gguf_writer.add_file_type(self.ftype)
  5655. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5656. del bid # unused
  5657. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  5658. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  5659. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  5660. # and decoder and ignore the remaining ones.
  5661. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  5662. if not self.shared_token_embeddings_found:
  5663. name = "shared.weight"
  5664. self.shared_token_embeddings_found = True
  5665. else:
  5666. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  5667. return []
  5668. return [(self.map_tensor_name(name), data_torch)]
  5669. @ModelBase.register("JAISLMHeadModel")
  5670. class JaisModel(TextModel):
  5671. model_arch = gguf.MODEL_ARCH.JAIS
  5672. def __init__(self, *args, **kwargs):
  5673. super().__init__(*args, **kwargs)
  5674. # SwigLU activation
  5675. assert self.hparams["activation_function"] == "swiglu"
  5676. # ALiBi position embedding
  5677. assert self.hparams["position_embedding_type"] == "alibi"
  5678. # Embeddings scale
  5679. self.embeddings_scale = 1.0
  5680. if 'mup_embeddings_scale' in self.hparams:
  5681. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  5682. elif 'embeddings_scale' in self.hparams:
  5683. self.embeddings_scale = self.hparams['embeddings_scale']
  5684. else:
  5685. assert False
  5686. self.width_scale = 1.0
  5687. if 'mup_output_alpha' in self.hparams:
  5688. assert 'mup_width_scale' in self.hparams
  5689. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  5690. elif 'width_scale' in self.hparams:
  5691. self.width_scale = self.hparams['width_scale']
  5692. else:
  5693. assert False
  5694. self.max_alibi_bias = 8.0
  5695. def set_vocab(self):
  5696. self._set_vocab_gpt2()
  5697. def set_gguf_parameters(self):
  5698. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  5699. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  5700. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  5701. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  5702. self.gguf_writer.add_head_count(self.hparams["n_head"])
  5703. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  5704. self.gguf_writer.add_file_type(self.ftype)
  5705. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5706. del bid # unused
  5707. tensors: list[tuple[str, Tensor]] = []
  5708. # we don't need these
  5709. if name.endswith((".attn.bias")):
  5710. return tensors
  5711. if name.endswith(("relative_pe.slopes")):
  5712. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  5713. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  5714. # but Jais's PyTorch model simply precalculates the slope values and places them
  5715. # in relative_pes.slopes
  5716. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  5717. first_val = float(data_torch[0].item())
  5718. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  5719. return tensors
  5720. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  5721. data_torch = data_torch.transpose(1, 0)
  5722. new_name = self.map_tensor_name(name)
  5723. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  5724. tensors.append((new_name, data_torch * self.embeddings_scale))
  5725. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  5726. tensors.append((new_name, data_torch * self.width_scale))
  5727. else:
  5728. tensors.append((new_name, data_torch))
  5729. return tensors
  5730. def prepare_tensors(self):
  5731. super().prepare_tensors()
  5732. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  5733. @ModelBase.register("Glm4ForCausalLM", "Glm4vForConditionalGeneration")
  5734. class Glm4Model(TextModel):
  5735. model_arch = gguf.MODEL_ARCH.GLM4
  5736. def set_vocab(self):
  5737. from transformers import AutoTokenizer
  5738. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  5739. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5740. tokens, toktypes, tokpre = self.get_vocab_base()
  5741. self.gguf_writer.add_tokenizer_model("gpt2")
  5742. self.gguf_writer.add_tokenizer_pre(tokpre)
  5743. self.gguf_writer.add_token_list(tokens)
  5744. self.gguf_writer.add_token_types(toktypes)
  5745. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5746. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5747. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  5748. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  5749. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5750. special_vocab.add_to_gguf(self.gguf_writer)
  5751. def set_gguf_parameters(self):
  5752. super().set_gguf_parameters()
  5753. if (rope_dim := self.hparams.get("head_dim")) is None:
  5754. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  5755. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  5756. rope_scaling = self.hparams.get("rope_scaling") or {}
  5757. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  5758. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  5759. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  5760. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  5761. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5762. if name.startswith("model.visual."): # ignore visual part of Glm4v
  5763. return []
  5764. elif name.startswith("model.language_model."):
  5765. name = name.replace("language_model.", "") # for Glm4v
  5766. return super().modify_tensors(data_torch, name, bid)
  5767. @ModelBase.register("Glm4MoeForCausalLM")
  5768. class Glm4MoeModel(TextModel):
  5769. model_arch = gguf.MODEL_ARCH.GLM4_MOE
  5770. def __init__(self, *args, **kwargs):
  5771. super().__init__(*args, **kwargs)
  5772. # GLM4_MOE has num_hidden_layers + 1 actual layers (including NextN layer)
  5773. self.block_count = self.hparams["num_hidden_layers"] + self.hparams.get("num_nextn_predict_layers", 0)
  5774. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  5775. def set_vocab(self):
  5776. from transformers import AutoTokenizer
  5777. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  5778. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5779. tokens, toktypes, tokpre = self.get_vocab_base()
  5780. self.gguf_writer.add_tokenizer_model("gpt2")
  5781. self.gguf_writer.add_tokenizer_pre(tokpre)
  5782. self.gguf_writer.add_token_list(tokens)
  5783. self.gguf_writer.add_token_types(toktypes)
  5784. # Special tokens
  5785. # Note: Using <|endoftext|> (151329) for eot causes endless generation
  5786. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["[gMASK]"]) # 151331
  5787. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) # 151336
  5788. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) # 151329
  5789. special_vocab._set_special_token("eom", tokenizer.get_added_vocab()["<|observation|>"]) # 151338
  5790. # Patch broken chat template
  5791. if isinstance(special_vocab.chat_template, str) and "visible_text(m.content).endswith" in special_vocab.chat_template:
  5792. special_vocab.chat_template = special_vocab.chat_template.replace(
  5793. """{{ visible_text(m.content) }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}""",
  5794. """{% set content = visible_text(m.content) %}{{ content }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not content.endswith("/nothink")) else '' -}}""")
  5795. special_vocab.add_to_gguf(self.gguf_writer)
  5796. def set_gguf_parameters(self):
  5797. super().set_gguf_parameters()
  5798. if (rope_dim := self.hparams.get("head_dim")) is None:
  5799. rope_dim = (
  5800. self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  5801. )
  5802. self.gguf_writer.add_rope_dimension_count(
  5803. int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))
  5804. )
  5805. # MoE parameters - Use only routed expert count (shared experts handled separately)
  5806. if (n_routed_experts := self.hparams.get("n_routed_experts")) is not None:
  5807. self.gguf_writer.add_expert_count(n_routed_experts)
  5808. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  5809. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  5810. if (n_shared_experts := self.hparams.get("n_shared_experts")) is not None:
  5811. self.gguf_writer.add_expert_shared_count(n_shared_experts)
  5812. if (first_k_dense_replace := self.hparams.get("first_k_dense_replace")) is not None:
  5813. self.gguf_writer.add_leading_dense_block_count(first_k_dense_replace)
  5814. # Expert gating function (sigmoid for GLM4_MOE)
  5815. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  5816. # Routed scaling factor
  5817. if (routed_scaling_factor := self.hparams.get("routed_scaling_factor")) is not None:
  5818. self.gguf_writer.add_expert_weights_scale(routed_scaling_factor)
  5819. # Normalise topk probabilities
  5820. if (norm_topk_prob := self.hparams.get("norm_topk_prob")) is not None:
  5821. self.gguf_writer.add_expert_weights_norm(norm_topk_prob)
  5822. # NextN/MTP prediction layers
  5823. if (num_nextn_predict_layers := self.hparams.get("num_nextn_predict_layers")) is not None:
  5824. self.gguf_writer.add_nextn_predict_layers(num_nextn_predict_layers)
  5825. _experts: list[dict[str, Tensor]] | None = None
  5826. def modify_tensors(
  5827. self, data_torch: Tensor, name: str, bid: int | None
  5828. ) -> Iterable[tuple[str, Tensor]]:
  5829. if name.startswith("model.visual."): # ignore visual part
  5830. return []
  5831. elif name.startswith("model.language_model."):
  5832. name = name.replace("language_model.", "") # for multimodal variants
  5833. # Handle main token embedding (but not layer-specific NextN embeddings)
  5834. if name == "model.embed_tokens.weight" and ".layers." not in name:
  5835. return [(self.map_tensor_name("token_embd.weight"), data_torch)]
  5836. # Handle routed experts
  5837. if name.find("mlp.experts") != -1:
  5838. n_experts = self.hparams["n_routed_experts"]
  5839. assert bid is not None
  5840. if self._experts is None:
  5841. self._experts = [{} for _ in range(self.block_count)]
  5842. self._experts[bid][name] = data_torch
  5843. if len(self._experts[bid]) >= n_experts * 3:
  5844. tensors: list[tuple[str, Tensor]] = []
  5845. # merge the experts into a single 3d tensor
  5846. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  5847. datas: list[Tensor] = []
  5848. for xid in range(n_experts):
  5849. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  5850. datas.append(self._experts[bid][ename])
  5851. del self._experts[bid][ename]
  5852. data_torch = torch.stack(datas, dim=0)
  5853. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5854. new_name = self.map_tensor_name(merged_name)
  5855. tensors.append((new_name, data_torch))
  5856. return tensors
  5857. else:
  5858. return []
  5859. if name.endswith("e_score_correction_bias"):
  5860. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  5861. new_name = self.map_tensor_name(name)
  5862. return [(new_name, data_torch)]
  5863. def prepare_tensors(self):
  5864. super().prepare_tensors()
  5865. if self._experts is not None:
  5866. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5867. experts = [k for d in self._experts for k in d.keys()]
  5868. if len(experts) > 0:
  5869. raise ValueError(f"Unprocessed experts: {experts}")
  5870. @ModelBase.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
  5871. class ChatGLMModel(TextModel):
  5872. model_arch = gguf.MODEL_ARCH.CHATGLM
  5873. def set_vocab_chatglm3(self):
  5874. dir_model = self.dir_model
  5875. hparams = self.hparams
  5876. tokens: list[bytes] = []
  5877. toktypes: list[int] = []
  5878. scores: list[float] = []
  5879. from transformers import AutoTokenizer
  5880. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  5881. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  5882. assert max(tokenizer.get_vocab().values()) < vocab_size
  5883. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  5884. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  5885. for token_id in range(vocab_size):
  5886. piece = tokenizer._convert_id_to_token(token_id)
  5887. if token_id == 0:
  5888. piece = "<unk>"
  5889. elif token_id == 1:
  5890. piece = "<bos>"
  5891. elif token_id == 2:
  5892. piece = "<eos>"
  5893. text = piece.encode("utf-8")
  5894. score = 0.0
  5895. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  5896. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  5897. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  5898. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  5899. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  5900. if piece in special_tokens:
  5901. toktype = SentencePieceTokenTypes.CONTROL
  5902. elif len(piece) == 0:
  5903. text = f"[PAD{token_id}]".encode("utf-8")
  5904. toktype = SentencePieceTokenTypes.UNUSED
  5905. else:
  5906. toktype = SentencePieceTokenTypes.USER_DEFINED
  5907. tokens.append(text)
  5908. scores.append(score)
  5909. toktypes.append(toktype)
  5910. continue
  5911. toktype = SentencePieceTokenTypes.NORMAL
  5912. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  5913. toktype = SentencePieceTokenTypes.UNKNOWN
  5914. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  5915. toktype = SentencePieceTokenTypes.CONTROL
  5916. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  5917. toktype = SentencePieceTokenTypes.UNUSED
  5918. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  5919. toktype = SentencePieceTokenTypes.BYTE
  5920. tokens.append(text)
  5921. scores.append(score)
  5922. toktypes.append(toktype)
  5923. self.gguf_writer.add_tokenizer_model("llama")
  5924. # glm3 needs prefix and suffix formatted as:
  5925. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  5926. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  5927. self.gguf_writer.add_token_list(tokens)
  5928. self.gguf_writer.add_token_scores(scores)
  5929. self.gguf_writer.add_token_types(toktypes)
  5930. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  5931. special_vocab.add_to_gguf(self.gguf_writer)
  5932. @staticmethod
  5933. def token_bytes_to_string(b):
  5934. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  5935. byte_encoder = bytes_to_unicode()
  5936. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  5937. @staticmethod
  5938. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  5939. parts = [bytes([b]) for b in token]
  5940. while True:
  5941. min_idx = None
  5942. min_rank = None
  5943. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  5944. rank = mergeable_ranks.get(pair[0] + pair[1])
  5945. if rank is not None and (min_rank is None or rank < min_rank):
  5946. min_idx = i
  5947. min_rank = rank
  5948. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  5949. break
  5950. assert min_idx is not None
  5951. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  5952. return parts
  5953. def set_vocab(self):
  5954. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  5955. self.set_vocab_chatglm3()
  5956. return
  5957. dir_model = self.dir_model
  5958. hparams = self.hparams
  5959. tokens: list[str] = []
  5960. toktypes: list[int] = []
  5961. from transformers import AutoTokenizer
  5962. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  5963. vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"])
  5964. assert max(tokenizer.get_vocab().values()) < vocab_size
  5965. tokens, toktypes, tokpre = self.get_vocab_base()
  5966. self.gguf_writer.add_tokenizer_model("gpt2")
  5967. self.gguf_writer.add_tokenizer_pre(tokpre)
  5968. self.gguf_writer.add_token_list(tokens)
  5969. self.gguf_writer.add_token_types(toktypes)
  5970. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  5971. # only add special tokens when they were not already loaded from config.json
  5972. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  5973. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  5974. # this one is usually not in config.json anyway
  5975. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  5976. special_vocab.add_to_gguf(self.gguf_writer)
  5977. def set_gguf_parameters(self):
  5978. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  5979. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  5980. n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head))
  5981. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  5982. self.gguf_writer.add_embedding_length(n_embed)
  5983. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed)))
  5984. self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"]))
  5985. self.gguf_writer.add_head_count(n_head)
  5986. self.gguf_writer.add_head_count_kv(n_head_kv)
  5987. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5))
  5988. self.gguf_writer.add_file_type(self.ftype)
  5989. if "attention_dim" in self.hparams:
  5990. rope_dim = self.hparams["attention_dim"]
  5991. else:
  5992. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  5993. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  5994. self.gguf_writer.add_add_bos_token(False)
  5995. rope_freq = 10000
  5996. if "rope_ratio" in self.hparams:
  5997. rope_freq = rope_freq * self.hparams["rope_ratio"]
  5998. self.gguf_writer.add_rope_freq_base(rope_freq)
  5999. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6000. del bid # unused
  6001. if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."):
  6002. return []
  6003. name = name.removeprefix("transformer.")
  6004. return [(self.map_tensor_name(name), data_torch)]
  6005. @ModelBase.register("NemotronForCausalLM")
  6006. class NemotronModel(TextModel):
  6007. model_arch = gguf.MODEL_ARCH.NEMOTRON
  6008. def set_vocab(self):
  6009. self._set_vocab_sentencepiece()
  6010. self.gguf_writer.add_pad_token_id(0)
  6011. self.gguf_writer.add_unk_token_id(1)
  6012. def set_gguf_parameters(self):
  6013. super().set_gguf_parameters()
  6014. hparams = self.hparams
  6015. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6016. f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
  6017. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  6018. # * Partial RoPE
  6019. rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
  6020. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  6021. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  6022. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  6023. # * RopeScaling for Nemotron
  6024. if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
  6025. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6026. else:
  6027. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6028. self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
  6029. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6030. # * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
  6031. # model.layers.{l}.input_layernorm.weight
  6032. # model.layers.{l}.post_attention_layernorm.weight
  6033. # model.norm.weight
  6034. if name.endswith("norm.weight"):
  6035. data_torch = data_torch + 1
  6036. return [(self.map_tensor_name(name), data_torch)]
  6037. @ModelBase.register("ExaoneForCausalLM")
  6038. class ExaoneModel(TextModel):
  6039. model_arch = gguf.MODEL_ARCH.EXAONE
  6040. def set_gguf_parameters(self):
  6041. hparams = self.hparams
  6042. assert (hparams["activation_function"] == "silu")
  6043. max_position_embeddings = hparams["max_position_embeddings"]
  6044. embed_dim = hparams["hidden_size"]
  6045. num_heads = hparams["num_attention_heads"]
  6046. num_kv_heads = hparams.get("num_key_value_heads", num_heads)
  6047. layer_norm_eps = hparams["layer_norm_epsilon"]
  6048. intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
  6049. num_layers = hparams["num_layers"]
  6050. # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
  6051. # attention_dropout_rate = hparams["attention_dropout"]
  6052. # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
  6053. # embed_dropout_rate = hparams["embed_dropout"]
  6054. self.gguf_writer.add_embedding_length(embed_dim)
  6055. self.gguf_writer.add_head_count(num_heads)
  6056. self.gguf_writer.add_head_count_kv(num_kv_heads)
  6057. self.gguf_writer.add_context_length(max_position_embeddings)
  6058. self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
  6059. self.gguf_writer.add_feed_forward_length(intermediate_size)
  6060. self.gguf_writer.add_block_count(num_layers)
  6061. self.gguf_writer.add_file_type(self.ftype)
  6062. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  6063. self.gguf_writer.add_rope_freq_base(rope_theta)
  6064. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
  6065. rotary_factor = rotary_factor if rotary_factor is not None else 1.0
  6066. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  6067. rope_scaling = self.hparams.get("rope_scaling") or {}
  6068. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  6069. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6070. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6071. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6072. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  6073. if rope_scaling.get("rope_type", '').lower() == "llama3":
  6074. base = self.hparams.get("rope_theta", 10000.0)
  6075. if (dim := self.hparams.get("head_dim")) is None:
  6076. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6077. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  6078. factor = rope_scaling.get("factor", 8.0)
  6079. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  6080. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  6081. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  6082. low_freq_wavelen = old_context_len / low_freq_factor
  6083. high_freq_wavelen = old_context_len / high_freq_factor
  6084. assert low_freq_wavelen != high_freq_wavelen
  6085. rope_factors = []
  6086. for freq in freqs:
  6087. wavelen = 2 * math.pi / freq
  6088. if wavelen < high_freq_wavelen:
  6089. rope_factors.append(1)
  6090. elif wavelen > low_freq_wavelen:
  6091. rope_factors.append(factor)
  6092. else:
  6093. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  6094. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  6095. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  6096. @ModelBase.register("Exaone4ForCausalLM")
  6097. class Exaone4Model(TextModel):
  6098. model_arch = gguf.MODEL_ARCH.EXAONE4
  6099. def set_vocab(self):
  6100. tokens, toktypes, tokpre = self.get_vocab_base()
  6101. self.gguf_writer.add_tokenizer_model("gpt2")
  6102. self.gguf_writer.add_tokenizer_pre(tokpre)
  6103. self.gguf_writer.add_token_list(tokens)
  6104. self.gguf_writer.add_token_types(toktypes)
  6105. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  6106. special_vocab.add_to_gguf(self.gguf_writer)
  6107. def set_gguf_parameters(self):
  6108. super().set_gguf_parameters()
  6109. hparams = self.hparams
  6110. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6111. if hparams.get("sliding_window") is not None:
  6112. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  6113. if "layer_types" in hparams:
  6114. self.gguf_writer.add_sliding_window_pattern([t == "sliding_attention" for t in hparams["layer_types"]])
  6115. elif "sliding_window_pattern" in hparams:
  6116. sliding_window_pattern = []
  6117. if isinstance(hparams["sliding_window_pattern"], str): # e.g. LLLG
  6118. for i in range(hparams["num_hidden_layers"]):
  6119. sliding_window_pattern.append(hparams["sliding_window_pattern"][i % len(hparams["sliding_window_pattern"])] == "L")
  6120. if isinstance(hparams["sliding_window_pattern"], int): # e.g. 4
  6121. for i in range(hparams["num_hidden_layers"]):
  6122. sliding_window_pattern.append((i + 1) % hparams["sliding_window_pattern"] != 0)
  6123. if len(sliding_window_pattern) == hparams["num_hidden_layers"]:
  6124. self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
  6125. rope_scaling = self.hparams.get("rope_scaling") or {}
  6126. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  6127. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  6128. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6129. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6130. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  6131. if rope_scaling.get("rope_type", '').lower() == "llama3":
  6132. base = self.hparams.get("rope_theta", 10_000.0)
  6133. if (dim := self.hparams.get("head_dim")) is None:
  6134. dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  6135. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  6136. factor = rope_scaling.get("factor", 16.0)
  6137. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  6138. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  6139. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  6140. low_freq_wavelen = old_context_len / low_freq_factor
  6141. high_freq_wavelen = old_context_len / high_freq_factor
  6142. rope_factors = []
  6143. for freq in freqs:
  6144. wavelen = 2 * math.pi / freq
  6145. if wavelen < high_freq_wavelen:
  6146. rope_factors.append(1)
  6147. elif wavelen > low_freq_wavelen:
  6148. rope_factors.append(factor)
  6149. else:
  6150. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  6151. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  6152. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  6153. @ModelBase.register("GraniteForCausalLM")
  6154. class GraniteModel(LlamaModel):
  6155. """Conversion for IBM's GraniteForCausalLM"""
  6156. model_arch = gguf.MODEL_ARCH.GRANITE
  6157. def set_gguf_parameters(self):
  6158. """Granite uses standard llama parameters with the following differences:
  6159. - No head_dim support
  6160. - New multiplier params:
  6161. - attention_scale
  6162. - embedding_scale
  6163. - residual_scale
  6164. - logits_scaling
  6165. """
  6166. if head_dim := self.hparams.pop("head_dim", None):
  6167. logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
  6168. super().set_gguf_parameters()
  6169. # NOTE: Convert _multiplier params to _scale params for naming
  6170. # consistency
  6171. if attention_scale := self.hparams.get("attention_multiplier"):
  6172. self.gguf_writer.add_attention_scale(attention_scale)
  6173. logger.info("gguf: (granite) attention_scale = %s", attention_scale)
  6174. if embedding_scale := self.hparams.get("embedding_multiplier"):
  6175. self.gguf_writer.add_embedding_scale(embedding_scale)
  6176. logger.info("gguf: (granite) embedding_scale = %s", embedding_scale)
  6177. if residual_scale := self.hparams.get("residual_multiplier"):
  6178. self.gguf_writer.add_residual_scale(residual_scale)
  6179. logger.info("gguf: (granite) residual_scale = %s", residual_scale)
  6180. if logits_scale := self.hparams.get("logits_scaling"):
  6181. self.gguf_writer.add_logit_scale(logits_scale)
  6182. logger.info("gguf: (granite) logits_scale = %s", logits_scale)
  6183. @ModelBase.register("GraniteMoeForCausalLM", "GraniteMoeSharedForCausalLM")
  6184. class GraniteMoeModel(GraniteModel):
  6185. """Conversion for IBM's GraniteMoeForCausalLM"""
  6186. model_arch = gguf.MODEL_ARCH.GRANITE_MOE
  6187. def set_gguf_parameters(self):
  6188. """GraniteMoeShared uses GraniteMoe parameters plus the following:
  6189. - shared_intermediate_size
  6190. """
  6191. super().set_gguf_parameters()
  6192. if shared_feed_forward_length := self.hparams.get("shared_intermediate_size"):
  6193. self.gguf_writer.add_expert_shared_feed_forward_length(shared_feed_forward_length)
  6194. logger.info("gguf: (granitemoeshared) shared_feed_forward_length = %s", shared_feed_forward_length)
  6195. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6196. """In modeling_granitemoe, the JetMoe implementation of parallel experts
  6197. is used. This essentially merges w1 and w3 into a single tensor with 2x
  6198. the hidden size that is then split during forward. To keep compatibility
  6199. with existing mixtral support, we pull them apart here.
  6200. """
  6201. if name.endswith("block_sparse_moe.input_linear.weight"):
  6202. ffn_dim = self.hparams["intermediate_size"]
  6203. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size"
  6204. gate, up = data_torch.split(ffn_dim, dim=-2)
  6205. return [
  6206. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate),
  6207. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up),
  6208. ]
  6209. has_experts = bool(self.hparams.get('num_local_experts'))
  6210. if name.endswith("shared_mlp.input_linear.weight"):
  6211. ffn_dim = self.hparams["shared_intermediate_size"]
  6212. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * shared_intermediate_size"
  6213. gate, up = data_torch.split(ffn_dim, dim=-2)
  6214. if has_experts:
  6215. return [
  6216. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), gate),
  6217. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), up),
  6218. ]
  6219. return [
  6220. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), gate),
  6221. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), up),
  6222. ]
  6223. if not has_experts and name.endswith("shared_mlp.output_linear.weight"):
  6224. return [
  6225. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, bid), data_torch)
  6226. ]
  6227. return super().modify_tensors(data_torch, name, bid)
  6228. @ModelBase.register("GraniteMoeHybridForCausalLM", "BambaForCausalLM")
  6229. class GraniteHybridModel(Mamba2Model, GraniteMoeModel):
  6230. """GraniteHybrid is a hybrid SSM + Attention model that uses Mamba2 SSM
  6231. layers and optionally uses MoE w/ a shared expert"""
  6232. model_arch = gguf.MODEL_ARCH.GRANITE_HYBRID
  6233. undo_permute = True
  6234. def __init__(self, *args, **kwargs):
  6235. # Hybrid mamba models use a prefix for the mamba-specific params.
  6236. # TODO: Extend this if the prefix(es) need to be configurable
  6237. self.hparam_prefixes = ["mamba"]
  6238. super().__init__(*args, **kwargs)
  6239. # Lists of which layers use ssm vs attention
  6240. self._attn_layers = self.get_attn_layers()
  6241. self._ssm_layers = [
  6242. i for i in range(self.block_count)
  6243. if i not in self._attn_layers
  6244. ]
  6245. # n_group and d_inner are used during reshape_tensors for mamba2
  6246. # NOTE: Explicitly include hparam prefix prefix for d_model to
  6247. # disambiguate with top-level head_dim
  6248. # NOTE 2: If needed for future models, this can be isolated in a method
  6249. # to separate the prefix setting and teh keys used
  6250. self.d_model = self.find_hparam([f"{self.hparam_prefixes[0]}_head_dim", "hidden_size", "d_model"])
  6251. self.n_group = self.find_hparam(["n_groups", "num_groups"])
  6252. self.d_inner = self.find_hparam(["expand", "num_heads"]) * self.d_model
  6253. def get_attn_layers(self):
  6254. # Explicit list of layer type names
  6255. if layer_types := self.hparams.get("layer_types"):
  6256. return [
  6257. i for i, typ in enumerate(layer_types)
  6258. if typ == "attention"
  6259. ]
  6260. # Layer types indicated by index or period
  6261. attn_layers = self.hparams.get("attn_layer_indices", [])
  6262. if not attn_layers:
  6263. attn_period = self.hparams.get("attn_layer_period")
  6264. assert attn_period, "Didn't find attn_layer_indices or attn_layer_period"
  6265. attn_offset = self.hparams.get("attn_layer_offset")
  6266. assert attn_offset is not None, "No attention layer offset set with attn_layer_period"
  6267. attn_layers = [
  6268. i for i in range(self.block_count)
  6269. if i % attn_period == attn_offset
  6270. ]
  6271. return attn_layers
  6272. def find_hparam(self, keys: Iterable[str], *args, **kwargs) -> Any:
  6273. prefixed = []
  6274. for pfx in self.hparam_prefixes:
  6275. prefixed.extend(
  6276. "_".join([pfx, k])
  6277. for k in keys
  6278. )
  6279. keys = list(keys) + prefixed
  6280. return Mamba2Model.find_hparam(self, keys, *args, **kwargs)
  6281. def modify_tensors(
  6282. self, data_torch: Tensor, name: str, bid: int | None
  6283. ) -> Iterable[tuple[str, Tensor]]:
  6284. if (
  6285. name.endswith("block_sparse_moe.input_linear.weight")
  6286. or "shared_mlp" in name
  6287. ):
  6288. return GraniteMoeModel.modify_tensors(self, data_torch, name, bid)
  6289. # Determine whether this is a mamba layer or an attention layer
  6290. if bid in self._ssm_layers:
  6291. return Mamba2Model.modify_tensors(self, data_torch, name, bid)
  6292. elif bid in self._attn_layers:
  6293. return GraniteMoeModel.modify_tensors(self, data_torch, name, bid)
  6294. return [(self.map_tensor_name(name), data_torch)]
  6295. def set_gguf_parameters(self):
  6296. """This method merges params from both parents and some that are
  6297. specific to this model. The result is some duplication of how the params
  6298. get set. The following warnings are expected during conversion:
  6299. WARNING:Duplicated key name 'granitehybrid.attention.head_count_kv'
  6300. WARNING:Duplicated key name 'granitehybrid.context_length'
  6301. """
  6302. GraniteMoeModel.set_gguf_parameters(self)
  6303. ## Mamba mixer params ##
  6304. self.gguf_writer.add_ssm_conv_kernel(self.find_hparam(["conv_kernel", "d_conv"]))
  6305. self.gguf_writer.add_ssm_state_size(self.find_hparam(["state_size", "d_state", "state_dim", "ssm_state_size"]))
  6306. self.gguf_writer.add_ssm_group_count(self.n_group)
  6307. self.gguf_writer.add_ssm_inner_size(self.d_inner)
  6308. # NOTE: The mamba_dt_rank is _not_ the right field for how this is used
  6309. # in llama.cpp
  6310. self.gguf_writer.add_ssm_time_step_rank(self.find_hparam(["n_heads", "num_heads"]))
  6311. ## Attention params ##
  6312. head_count_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  6313. head_count_kv_vec = [
  6314. head_count_kv if i in self._attn_layers else 0 for i in range(self.block_count)
  6315. ]
  6316. if rope_dim := self.hparams.get("attn_rotary_emb"):
  6317. self.gguf_writer.add_rope_dimension_count(rope_dim)
  6318. self.gguf_writer.add_head_count_kv(head_count_kv_vec)
  6319. ## If Bamba, use rope, otherwise don't
  6320. use_rope = "BambaForCausalLM" in self.hparams["architectures"]
  6321. self.gguf_writer.add_rope_scaling_finetuned(use_rope)
  6322. if not use_rope:
  6323. self.gguf_writer.add_context_length(2**20)
  6324. ## Validation ##
  6325. d_head = self.find_hparam(["d_head"], optional=True) or 64
  6326. assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
  6327. assert self.d_inner % d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {d_head}"
  6328. def set_vocab(self):
  6329. self.hparams["pad_vocab_size_multiple"] = 8
  6330. Mamba2Model.set_vocab(self)
  6331. @ModelBase.register("NemotronHForCausalLM")
  6332. class NemotronHModel(GraniteHybridModel):
  6333. """Hybrid mamba2/attention model from NVIDIA"""
  6334. model_arch = gguf.MODEL_ARCH.NEMOTRON_H
  6335. def __init__(self, *args, **kwargs):
  6336. super().__init__(*args, **kwargs)
  6337. # Save the top-level head_dim for later
  6338. self.head_dim = self.hparams.get("head_dim", self.hparams.get("attention_head_dim"))
  6339. assert self.head_dim is not None, "Could not find the attention head dim in config"
  6340. # Don't use expand to calculate d_inner
  6341. self.d_inner = self.find_hparam(["num_heads"]) * self.d_model
  6342. # Update the ssm / attn / mlp layers
  6343. # M: Mamba2, *: Attention, -: MLP
  6344. hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
  6345. self._ssm_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "M"]
  6346. self._mlp_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "-"]
  6347. def get_attn_layers(self):
  6348. hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
  6349. assert len(hybrid_override_pattern) == self.block_count, "Mismatch between hybrid override and num_hidden_layers!"
  6350. return [i for i, val in enumerate(hybrid_override_pattern) if val == "*"]
  6351. def set_gguf_parameters(self):
  6352. super().set_gguf_parameters()
  6353. self.gguf_writer.add_key_length(self.head_dim)
  6354. self.gguf_writer.add_value_length(self.head_dim)
  6355. # Set feed_forward_length
  6356. # NOTE: This will trigger an override warning. This is preferrable to
  6357. # duplicating all the parent logic
  6358. n_ff = self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"])
  6359. self.gguf_writer.add_feed_forward_length([
  6360. n_ff if i in self._mlp_layers else 0 for i in range(self.block_count)
  6361. ])
  6362. def set_vocab(self):
  6363. super().set_vocab()
  6364. # The tokenizer _does_ add a BOS token (via post_processor type
  6365. # TemplateProcessing) but does not set add_bos_token to true in the
  6366. # config, so we need to explicitly override it here.
  6367. self.gguf_writer.add_add_bos_token(True)
  6368. @ModelBase.register("BailingMoeForCausalLM")
  6369. class BailingMoeModel(TextModel):
  6370. model_arch = gguf.MODEL_ARCH.BAILINGMOE
  6371. def set_vocab(self):
  6372. self._set_vocab_gpt2()
  6373. def set_gguf_parameters(self):
  6374. super().set_gguf_parameters()
  6375. hparams = self.hparams
  6376. if (rope_dim := hparams.get("head_dim")) is None:
  6377. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  6378. self.gguf_writer.add_rope_dimension_count(rope_dim)
  6379. rope_scaling = self.hparams.get("rope_scaling") or {}
  6380. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  6381. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  6382. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6383. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  6384. else:
  6385. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6386. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  6387. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  6388. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  6389. self.gguf_writer.add_expert_weights_scale(1.0)
  6390. self.gguf_writer.add_expert_count(hparams["num_experts"])
  6391. self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
  6392. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  6393. _experts: list[dict[str, Tensor]] | None = None
  6394. @staticmethod
  6395. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  6396. if n_head_kv is not None and n_head != n_head_kv:
  6397. n_head = n_head_kv
  6398. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  6399. .swapaxes(1, 2)
  6400. .reshape(weights.shape))
  6401. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6402. n_head = self.hparams["num_attention_heads"]
  6403. n_kv_head = self.hparams.get("num_key_value_heads")
  6404. n_embd = self.hparams["hidden_size"]
  6405. if (head_dim := self.hparams.get("head_dim")) is None:
  6406. head_dim = n_embd // n_head
  6407. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  6408. if name.endswith("attention.dense.weight"):
  6409. return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), data_torch)]
  6410. elif name.endswith("query_key_value.weight"):
  6411. q, k, v = data_torch.split([n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim], dim=-2)
  6412. return [
  6413. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), BailingMoeModel.permute(q, n_head, n_head)),
  6414. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), BailingMoeModel.permute(k, n_head, n_kv_head)),
  6415. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v)
  6416. ]
  6417. elif name.find("mlp.experts") != -1:
  6418. n_experts = self.hparams["num_experts"]
  6419. assert bid is not None
  6420. tensors: list[tuple[str, Tensor]] = []
  6421. if self._experts is None:
  6422. self._experts = [{} for _ in range(self.block_count)]
  6423. self._experts[bid][name] = data_torch
  6424. if len(self._experts[bid]) >= n_experts * 3:
  6425. # merge the experts into a single 3d tensor
  6426. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6427. datas: list[Tensor] = []
  6428. for xid in range(n_experts):
  6429. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6430. datas.append(self._experts[bid][ename])
  6431. del self._experts[bid][ename]
  6432. data_torch = torch.stack(datas, dim=0)
  6433. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6434. new_name = self.map_tensor_name(merged_name)
  6435. tensors.append((new_name, data_torch))
  6436. return tensors
  6437. new_name = self.map_tensor_name(name)
  6438. if new_name == output_name and self.hparams.get("norm_head"):
  6439. data_torch = data_torch.float()
  6440. data_torch /= torch.norm(data_torch, p=2, dim=0, keepdim=True) + 1e-7
  6441. return [(new_name, data_torch)]
  6442. def prepare_tensors(self):
  6443. super().prepare_tensors()
  6444. if self._experts is not None:
  6445. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6446. experts = [k for d in self._experts for k in d.keys()]
  6447. if len(experts) > 0:
  6448. raise ValueError(f"Unprocessed experts: {experts}")
  6449. @ModelBase.register("ChameleonForConditionalGeneration")
  6450. @ModelBase.register("ChameleonForCausalLM") # obsolete
  6451. class ChameleonModel(TextModel):
  6452. model_arch = gguf.MODEL_ARCH.CHAMELEON
  6453. def set_gguf_parameters(self):
  6454. super().set_gguf_parameters()
  6455. self.gguf_writer.add_swin_norm(self.hparams.get("swin_norm", False))
  6456. def set_vocab(self):
  6457. self._set_vocab_gpt2()
  6458. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6459. # ignore image tokenizer for now
  6460. # TODO: remove this once image support is implemented for Chameleon
  6461. if name.startswith("model.vqmodel"):
  6462. return []
  6463. n_head = self.hparams["num_attention_heads"]
  6464. n_kv_head = self.hparams.get("num_key_value_heads")
  6465. hidden_dim = self.hparams.get("hidden_size")
  6466. if name.endswith(("q_proj.weight", "q_proj.bias")):
  6467. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  6468. if name.endswith(("k_proj.weight", "k_proj.bias")):
  6469. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  6470. if name.endswith(("q_norm.weight", "q_norm.bias")):
  6471. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim)
  6472. if name.endswith(("k_norm.weight", "k_norm.bias")):
  6473. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim)
  6474. return [(self.map_tensor_name(name), data_torch)]
  6475. # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203
  6476. @staticmethod
  6477. def _reverse_hf_permute(data_torch, n_heads, hidden_dim):
  6478. head_dim = hidden_dim // n_heads
  6479. data_torch = data_torch[0].view(2, head_dim // 2).t().reshape(1, -1)
  6480. data_torch = data_torch.repeat_interleave(n_heads, 0)
  6481. return data_torch
  6482. @ModelBase.register("UltravoxModel")
  6483. class UltravoxModel(TextModel):
  6484. model_arch = gguf.MODEL_ARCH.LLAMA # dummy
  6485. def __init__(self, *args, **kwargs):
  6486. super().__init__(*args, **kwargs)
  6487. raise NotImplementedError("Ultravox does not have text decoder. Instead, it uses Llama or other models for text. If you want to get the audio encoder, please use --mmproj argument")
  6488. @ModelBase.register("Qwen2AudioForConditionalGeneration")
  6489. class WhisperEncoderModel(MmprojModel):
  6490. has_vision_encoder = False # no vision encoder
  6491. has_audio_encoder = True
  6492. def __init__(self, *args, **kwargs):
  6493. super().__init__(*args, **kwargs)
  6494. if "hidden_size" not in self.hparams and "intermediate_size" not in self.hparams:
  6495. self.hparams["hidden_size"] = self.hparams["d_model"]
  6496. self.hparams["intermediate_size"] = self.hparams["encoder_ffn_dim"]
  6497. self.hparams["num_attention_heads"] = self.hparams["encoder_attention_heads"]
  6498. def set_gguf_parameters(self):
  6499. super().set_gguf_parameters()
  6500. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2A)
  6501. self.gguf_writer.add_audio_num_mel_bins(self.hparams["num_mel_bins"])
  6502. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  6503. def tensor_force_quant(self, name, new_name, bid, n_dims):
  6504. if ".conv" in name and ".weight" in name:
  6505. return gguf.GGMLQuantizationType.F16
  6506. return super().tensor_force_quant(name, new_name, bid, n_dims)
  6507. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6508. del bid # unused
  6509. if name.startswith("language_model."):
  6510. # skip language model tensors
  6511. return []
  6512. # prevent clash naming with vision tensors
  6513. if name.startswith("multi_modal_projector"):
  6514. name = "audio." + name
  6515. if "conv1.bias" in name or "conv2.bias" in name:
  6516. # transpose conv1 and conv2 bias
  6517. data_torch = data_torch.unsqueeze(-1)
  6518. return [(self.map_tensor_name(name), data_torch)]
  6519. @ModelBase.register("UltravoxModel")
  6520. class UltravoxWhisperEncoderModel(WhisperEncoderModel):
  6521. has_vision_encoder = False # no vision encoder
  6522. has_audio_encoder = True
  6523. def set_gguf_parameters(self):
  6524. super().set_gguf_parameters()
  6525. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.ULTRAVOX)
  6526. self.gguf_writer.add_audio_stack_factor(self.global_config["stack_factor"])
  6527. @ModelBase.register("VoxtralForConditionalGeneration")
  6528. class VoxtralWhisperEncoderModel(WhisperEncoderModel):
  6529. has_vision_encoder = False # no vision encoder
  6530. has_audio_encoder = True
  6531. def set_gguf_parameters(self):
  6532. super().set_gguf_parameters()
  6533. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.VOXTRAL)
  6534. self.gguf_writer.add_audio_stack_factor(4) # == intermediate_size // hidden_size
  6535. @ModelBase.register("FalconH1ForCausalLM")
  6536. class FalconH1Model(Mamba2Model):
  6537. model_arch = gguf.MODEL_ARCH.FALCON_H1
  6538. def __init__(self, *args, **kwargs):
  6539. # Set the hparam prefixes for Falcon Mamba2
  6540. self.hparam_prefixes = ["mamba"]
  6541. # Initialize the base Mamba2Model
  6542. super().__init__(*args, **kwargs)
  6543. # Use Llama conversion for attention
  6544. self._transformer_model_class = LlamaModel
  6545. # n_group and d_inner are used during reshape_tensors for mamba2
  6546. self.n_group = self.find_hparam(["n_groups"])
  6547. self.d_inner = self.find_hparam(["mamba_d_ssm"])
  6548. self.d_head = self.find_hparam(["d_head"])
  6549. # Initialize any Falcon Mamba2 specific attributes
  6550. self.has_attention = True # Falcon Mamba2 has attention components
  6551. # Load Falcon-H1 multipliers from hyperparameters
  6552. self.attention_in_multiplier = self.find_hparam(["attention_in_multiplier"], optional=True)
  6553. self.attention_out_multiplier = self.find_hparam(["attention_out_multiplier"], optional=True)
  6554. self.ssm_in_multiplier = self.find_hparam(["ssm_in_multiplier"], optional=True)
  6555. self.ssm_out_multiplier = self.find_hparam(["ssm_out_multiplier"], optional=True)
  6556. self.mlp_multipliers = self.find_hparam(["mlp_multipliers"], optional=True)
  6557. self.ssm_multipliers = self.find_hparam(["ssm_multipliers"], optional=True)
  6558. self.intermediate_size = self.find_hparam(["intermediate_size"])
  6559. self.key_multiplier = self.find_hparam(["key_multiplier"], optional=True)
  6560. def find_hparam(self, keys: Iterable[str], *args, **kwargs) -> Any:
  6561. prefixed = []
  6562. for pfx in self.hparam_prefixes:
  6563. prefixed.extend(
  6564. "_".join([pfx, k])
  6565. for k in keys
  6566. )
  6567. keys = list(keys) + prefixed
  6568. return super().find_hparam(keys, *args, **kwargs)
  6569. def set_vocab(self):
  6570. self._set_vocab_gpt2()
  6571. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6572. tensors = list(super().modify_tensors(data_torch, name, bid))
  6573. tensor = tensors[0][1]
  6574. if "down_proj" in name:
  6575. tensor = tensor * self.mlp_multipliers[1]
  6576. elif "gate_proj" in name:
  6577. tensor = tensor * self.mlp_multipliers[0]
  6578. elif "k_proj" in name:
  6579. tensor = tensor * self.key_multiplier * self.attention_in_multiplier
  6580. elif "q_proj" in name:
  6581. tensor = tensor * self.attention_in_multiplier
  6582. elif "v_proj" in name:
  6583. tensor = tensor * self.attention_in_multiplier
  6584. elif "o_proj" in name:
  6585. tensor = tensor * self.attention_out_multiplier
  6586. elif "out_proj" in name:
  6587. tensor = tensor * self.ssm_out_multiplier
  6588. elif "in_proj" in name:
  6589. tensor = tensor * self.ssm_in_multiplier
  6590. zxbcdt_multipliers = self.hparams["ssm_multipliers"]
  6591. intermediate_size = self.hparams["mamba_d_ssm"]
  6592. groups_time_state_size = self.hparams["mamba_n_groups"] * self.hparams["mamba_d_state"]
  6593. tensor[:intermediate_size, :] *= zxbcdt_multipliers[0]
  6594. tensor[intermediate_size:2 * intermediate_size, :] *= zxbcdt_multipliers[1]
  6595. tensor[2 * intermediate_size:2 * intermediate_size + groups_time_state_size, :] *= zxbcdt_multipliers[2]
  6596. tensor[2 * intermediate_size + groups_time_state_size:2 * intermediate_size + 2 * groups_time_state_size, :] *= zxbcdt_multipliers[3]
  6597. tensor[2 * intermediate_size + 2 * groups_time_state_size:, :] *= zxbcdt_multipliers[4]
  6598. elif "lm_head" in name:
  6599. tensor = tensor * self.hparams["lm_head_multiplier"]
  6600. elif "embed_tokens" in name:
  6601. tensor = tensor * self.hparams["embedding_multiplier"]
  6602. elif "mamba.norm" in name:
  6603. tensor = tensor.reshape(self.n_group, self.d_inner // self.n_group)
  6604. tensors = [(tensors[0][0], tensor)]
  6605. return tensors
  6606. def set_gguf_parameters(self):
  6607. super().set_gguf_parameters()
  6608. ## General Params ##
  6609. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  6610. # Override some Mamba2 defaults
  6611. self.gguf_writer.add_block_count(self.block_count)
  6612. self.gguf_writer.add_context_length(self.hparams.get("max_position_embeddings", 0))
  6613. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  6614. ## Attention params ##
  6615. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) # Override value 0 from Mamba2
  6616. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  6617. self.gguf_writer.add_key_length(self.hparams["head_dim"])
  6618. self.gguf_writer.add_value_length(self.hparams["head_dim"])
  6619. ## Validation ##
  6620. assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
  6621. assert self.d_inner % self.d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {self.d_head}"
  6622. # Add any other Falcon Mamba2 specific configuration
  6623. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  6624. @ModelBase.register("HunYuanMoEV1ForCausalLM")
  6625. class HunYuanMoEModel(TextModel):
  6626. model_arch = gguf.MODEL_ARCH.HUNYUAN_MOE
  6627. def set_vocab(self):
  6628. from transformers import AutoTokenizer
  6629. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  6630. # 1. Get the pre-tokenizer identifier hash
  6631. tokpre = self.get_vocab_base_pre(tokenizer)
  6632. # 2. Reverse-engineer the merges list from mergeable_ranks
  6633. merges = []
  6634. vocab = {}
  6635. mergeable_ranks = tokenizer.mergeable_ranks
  6636. for token, rank in mergeable_ranks.items():
  6637. vocab[QwenModel.token_bytes_to_string(token)] = rank
  6638. if len(token) == 1:
  6639. continue
  6640. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  6641. if len(merged) == 2: # todo this is an assert in Qwen, why?
  6642. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  6643. # 3. Generate the tokens and toktypes lists
  6644. vocab_size = self.hparams["vocab_size"]
  6645. assert tokenizer.vocab_size == vocab_size
  6646. special_tokens = tokenizer.special_tokens
  6647. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  6648. tokens: list[str] = []
  6649. toktypes: list[int] = []
  6650. for i in range(vocab_size):
  6651. if i not in reverse_vocab:
  6652. tokens.append(f"[PAD{i}]")
  6653. toktypes.append(gguf.TokenType.UNUSED)
  6654. else:
  6655. token = reverse_vocab[i]
  6656. tokens.append(token)
  6657. if i in special_tokens.values():
  6658. toktypes.append(gguf.TokenType.CONTROL)
  6659. else:
  6660. toktypes.append(gguf.TokenType.NORMAL)
  6661. # 4. Write all vocab-related fields to the GGUF writer
  6662. self.gguf_writer.add_tokenizer_model("gpt2")
  6663. self.gguf_writer.add_tokenizer_pre(tokpre)
  6664. self.gguf_writer.add_token_list(tokens)
  6665. self.gguf_writer.add_token_types(toktypes)
  6666. self.gguf_writer.add_token_merges(merges)
  6667. # 5. Add special tokens and chat templates
  6668. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  6669. special_vocab.add_to_gguf(self.gguf_writer)
  6670. # FIX for BOS token: Overwrite incorrect id read from config.json
  6671. self.gguf_writer.add_bos_token_id(127959) # <|bos|>
  6672. def set_gguf_parameters(self):
  6673. super().set_gguf_parameters()
  6674. hparams = self.hparams
  6675. self.gguf_writer.add_expert_count(hparams["num_experts"])
  6676. self.gguf_writer.add_expert_shared_feed_forward_length(hparams["intermediate_size"])
  6677. moe_intermediate_size = hparams["moe_intermediate_size"]
  6678. assert all(n == moe_intermediate_size[0] for n in moe_intermediate_size)
  6679. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size[0])
  6680. moe_topk = hparams["moe_topk"]
  6681. assert all(topk == moe_topk[0] for topk in moe_topk)
  6682. self.gguf_writer.add_expert_used_count(moe_topk[0])
  6683. moe_shared_expert = hparams["num_shared_expert"]
  6684. assert all(n == moe_shared_expert[0] for n in moe_shared_expert)
  6685. self.gguf_writer.add_expert_shared_count(moe_shared_expert[0])
  6686. # Rope
  6687. rope_scaling = hparams.get("rope_scaling", {})
  6688. if rope_scaling.get("type") == "dynamic":
  6689. # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
  6690. # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
  6691. alpha = rope_scaling.get("alpha", 1000)
  6692. base = hparams.get("rope_theta", 10000.0)
  6693. dim = (hparams["hidden_size"] // hparams["num_attention_heads"]) # 128
  6694. scaled_base = base * (alpha ** (dim / (dim - 2))) # 10000 * (1000 ** (128 / 126)) = 11158839.9251
  6695. self.gguf_writer.add_rope_freq_base(scaled_base)
  6696. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6697. self.gguf_writer.add_rope_scaling_factor(1)
  6698. # There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
  6699. self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
  6700. self.gguf_writer.add_context_length(256 * 1024) # 256k context length
  6701. # if any of our assumptions about the values are wrong, something has changed and this may need to be updated
  6702. assert alpha == 1000 and base == 10000.0 and dim == 128 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
  6703. "HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
  6704. _experts: list[dict[str, Tensor]] | None = None
  6705. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6706. if name == "lm_head.weight":
  6707. if self.hparams.get("tie_word_embeddings", False):
  6708. logger.info("Skipping tied output layer 'lm_head.weight'")
  6709. return []
  6710. if name.find("mlp.experts") != -1:
  6711. n_experts = self.hparams["num_experts"]
  6712. assert bid is not None
  6713. if self._experts is None:
  6714. self._experts = [{} for _ in range(self.block_count)]
  6715. self._experts[bid][name] = data_torch
  6716. if len(self._experts[bid]) >= n_experts * 3:
  6717. # merge the experts into a single 3d tensor
  6718. tensors: list[tuple[str, Tensor]] = []
  6719. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6720. datas: list[Tensor] = []
  6721. for xid in range(n_experts):
  6722. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6723. datas.append(self._experts[bid][ename])
  6724. del self._experts[bid][ename]
  6725. data_torch = torch.stack(datas, dim=0)
  6726. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6727. new_name = self.map_tensor_name(merged_name)
  6728. tensors.append((new_name, data_torch))
  6729. return tensors
  6730. else:
  6731. return []
  6732. return [(self.map_tensor_name(name), data_torch)]
  6733. def prepare_tensors(self):
  6734. super().prepare_tensors()
  6735. if self._experts is not None:
  6736. experts = [k for d in self._experts for k in d.keys()]
  6737. if len(experts) > 0:
  6738. raise ValueError(f"Unprocessed experts: {experts}")
  6739. @ModelBase.register("LLaDAMoEModel", "LLaDAMoEModelLM")
  6740. class LLaDAMoEModel(TextModel):
  6741. model_arch = gguf.MODEL_ARCH.LLADA_MOE
  6742. def set_gguf_parameters(self):
  6743. super().set_gguf_parameters()
  6744. if (n_experts := self.hparams.get("num_experts")) is not None:
  6745. self.gguf_writer.add_expert_count(n_experts)
  6746. if (expert_intermediate_size := self.hparams.get("expert_intermediate_size")) is not None:
  6747. self.gguf_writer.add_expert_feed_forward_length(expert_intermediate_size)
  6748. # number of experts used per token (top-k)
  6749. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  6750. self.gguf_writer.add_expert_used_count(n_experts_used)
  6751. self.gguf_writer.add_mask_token_id(156895)
  6752. self.gguf_writer.add_causal_attention(False)
  6753. self.gguf_writer.add_diffusion_shift_logits(False)
  6754. _experts: list[dict[str, Tensor]] | None = None
  6755. # Copied from: Qwen2MoeModel
  6756. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6757. # process the experts separately
  6758. if name.find("experts") != -1:
  6759. n_experts = self.hparams["num_experts"]
  6760. assert bid is not None
  6761. if self._experts is None:
  6762. self._experts = [{} for _ in range(self.block_count)]
  6763. self._experts[bid][name] = data_torch
  6764. if len(self._experts[bid]) >= n_experts * 3:
  6765. tensors: list[tuple[str, Tensor]] = []
  6766. # merge the experts into a single 3d tensor
  6767. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  6768. datas: list[Tensor] = []
  6769. for xid in range(n_experts):
  6770. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  6771. datas.append(self._experts[bid][ename])
  6772. del self._experts[bid][ename]
  6773. data_torch = torch.stack(datas, dim=0)
  6774. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  6775. new_name = self.map_tensor_name(merged_name)
  6776. tensors.append((new_name, data_torch))
  6777. return tensors
  6778. else:
  6779. return []
  6780. return [(self.map_tensor_name(name), data_torch)]
  6781. # Copied from: Qwen2MoeModel
  6782. def prepare_tensors(self):
  6783. super().prepare_tensors()
  6784. if self._experts is not None:
  6785. # flatten `list[dict[str, Tensor]]` into `list[str]`
  6786. experts = [k for d in self._experts for k in d.keys()]
  6787. if len(experts) > 0:
  6788. raise ValueError(f"Unprocessed experts: {experts}")
  6789. @ModelBase.register("HunYuanDenseV1ForCausalLM")
  6790. class HunYuanModel(TextModel):
  6791. model_arch = gguf.MODEL_ARCH.HUNYUAN_DENSE
  6792. def set_vocab(self):
  6793. if (self.dir_model / "tokenizer.json").is_file():
  6794. self._set_vocab_gpt2()
  6795. else:
  6796. from transformers import AutoTokenizer
  6797. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  6798. # 1. Get the pre-tokenizer identifier hash
  6799. tokpre = self.get_vocab_base_pre(tokenizer)
  6800. # 2. Reverse-engineer the merges list from mergeable_ranks
  6801. merges = []
  6802. vocab = {}
  6803. mergeable_ranks = tokenizer.mergeable_ranks
  6804. for token, rank in mergeable_ranks.items():
  6805. vocab[QwenModel.token_bytes_to_string(token)] = rank
  6806. if len(token) == 1:
  6807. continue
  6808. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  6809. if len(merged) == 2:
  6810. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  6811. # 3. Generate the tokens and toktypes lists
  6812. vocab_size = self.hparams["vocab_size"]
  6813. assert tokenizer.vocab_size == vocab_size
  6814. special_tokens = tokenizer.special_tokens
  6815. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
  6816. tokens: list[str] = []
  6817. toktypes: list[int] = []
  6818. for i in range(vocab_size):
  6819. if i not in reverse_vocab:
  6820. tokens.append(f"[PAD{i}]")
  6821. toktypes.append(gguf.TokenType.UNUSED)
  6822. else:
  6823. token = reverse_vocab[i]
  6824. tokens.append(token)
  6825. if i in special_tokens.values():
  6826. toktypes.append(gguf.TokenType.CONTROL)
  6827. else:
  6828. toktypes.append(gguf.TokenType.NORMAL)
  6829. # 4. Write all vocab-related fields to the GGUF writer
  6830. self.gguf_writer.add_tokenizer_model("gpt2")
  6831. self.gguf_writer.add_tokenizer_pre(tokpre)
  6832. self.gguf_writer.add_token_list(tokens)
  6833. self.gguf_writer.add_token_types(toktypes)
  6834. self.gguf_writer.add_token_merges(merges)
  6835. # 5. Add special tokens and chat templates
  6836. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  6837. special_vocab.add_to_gguf(self.gguf_writer)
  6838. # FIX for BOS token: Overwrite incorrect id read from config.json
  6839. if self.hparams['hidden_size'] == 4096:
  6840. self.gguf_writer.add_bos_token_id(127958) # only for 7b dense, fix <|bos|> token
  6841. def set_gguf_parameters(self):
  6842. super().set_gguf_parameters()
  6843. hparams = self.hparams
  6844. # Rope
  6845. rope_scaling = hparams.get("rope_scaling", {})
  6846. if rope_scaling.get("type") == "dynamic":
  6847. # HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
  6848. # 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
  6849. alpha = rope_scaling.get("alpha", 50)
  6850. base = hparams.get("rope_theta", 10000.0)
  6851. dim = hparams["head_dim"]
  6852. scaled_base = base * (alpha ** (dim / (dim - 2)))
  6853. self.gguf_writer.add_rope_freq_base(scaled_base)
  6854. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  6855. self.gguf_writer.add_rope_scaling_factor(1)
  6856. # There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
  6857. self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
  6858. self.gguf_writer.add_context_length(256 * 1024) # 256k context length
  6859. # if any of our assumptions about the values are wrong, something has changed and this may need to be updated
  6860. assert base == 10000.0 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
  6861. "HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
  6862. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6863. if name == "lm_head.weight":
  6864. if self.hparams.get("tie_word_embeddings", False):
  6865. logger.info("Skipping tied output layer 'lm_head.weight'")
  6866. return []
  6867. return [(self.map_tensor_name(name), data_torch)]
  6868. @ModelBase.register("SmolLM3ForCausalLM")
  6869. class SmolLM3Model(LlamaModel):
  6870. model_arch = gguf.MODEL_ARCH.SMOLLM3
  6871. def set_vocab(self):
  6872. super().set_vocab()
  6873. # remove unsupported array slicing in chat template
  6874. # ref: https://huggingface.co/ggml-org/SmolLM3-3B-GGUF/discussions/1
  6875. from transformers import AutoTokenizer
  6876. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  6877. if tokenizer.chat_template is not None:
  6878. chat_template = tokenizer.chat_template.replace("[:]", "")
  6879. self.gguf_writer.add_chat_template(chat_template)
  6880. @ModelBase.register("GptOssForCausalLM")
  6881. class GptOssModel(TextModel):
  6882. model_arch = gguf.MODEL_ARCH.GPT_OSS
  6883. def transform_nibble_layout(self, tensor):
  6884. assert tensor.dtype == torch.uint8
  6885. assert tensor.shape[-1] == 16
  6886. # swap nibbles
  6887. t_lo = tensor & 0x0F
  6888. t_hi = tensor & 0xF0
  6889. t_swapped = (t_lo << 4) | (t_hi >> 4)
  6890. tensor = t_swapped
  6891. # transform aaaa...bbbb... to abababab...
  6892. blk_a, blk_b = tensor.chunk(2, dim=-1)
  6893. # get a_
  6894. blk_a0 = (blk_a & 0xF0).view(-1, 1)
  6895. blk_a1 = (blk_a << 4).view(-1, 1)
  6896. blk_a = torch.stack((blk_a0, blk_a1), dim=2).view(tensor.shape)
  6897. # get _b
  6898. blk_b0 = (blk_b >> 4).view(-1, 1)
  6899. blk_b1 = (blk_b & 0x0F).view(-1, 1)
  6900. blk_b = torch.stack((blk_b0, blk_b1), dim=2).view(tensor.shape)
  6901. # swap once more
  6902. out = blk_a | blk_b
  6903. out_h = out & 0xF0
  6904. out_l = out & 0x0F
  6905. out = (out_h >> 4) | (out_l << 4)
  6906. return out
  6907. def repack_mxfp4(self, new_name: str, blocks: Tensor, scales: Tensor):
  6908. assert blocks.dtype == torch.uint8
  6909. assert scales.dtype == torch.uint8
  6910. scales = scales.unsqueeze(-1)
  6911. assert len(blocks.shape) == 4
  6912. assert len(scales.shape) == 4
  6913. blocks = self.transform_nibble_layout(blocks)
  6914. new_data = torch.concat((scales, blocks), dim=-1)
  6915. new_shape = [new_data.shape[0], new_data.shape[1], new_data.shape[2] * 32]
  6916. logger.info(f"Repacked {new_name} with shape {new_shape} and quantization MXFP4")
  6917. # flatten last dim
  6918. new_data = new_data.view(new_data.shape[0], new_data.shape[1], new_data.shape[2] * new_data.shape[3])
  6919. new_data = new_data.numpy()
  6920. self.gguf_writer.add_tensor(new_name, new_data, raw_dtype=gguf.GGMLQuantizationType.MXFP4)
  6921. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  6922. blocks0: Tensor = torch.zeros(1)
  6923. blocks1: Tensor = torch.zeros(1)
  6924. # we assume that tensors are loaded in the correct order
  6925. for name, data_torch in self.get_tensors():
  6926. if "mlp.experts.down_proj_blocks" in name:
  6927. blocks0 = data_torch
  6928. elif "mlp.experts.down_proj_scales" in name:
  6929. new_name = self.map_tensor_name(name.replace("_scales", ".weight"))
  6930. self.repack_mxfp4(new_name, blocks0, data_torch)
  6931. elif "mlp.experts.gate_up_proj_blocks" in name:
  6932. blocks0, blocks1 = data_torch[:, ::2, :, :], data_torch[:, 1::2, :, :]
  6933. elif "mlp.experts.gate_up_proj_scales" in name:
  6934. scales0, scales1 = data_torch[:, ::2, :], data_torch[:, 1::2, :]
  6935. new_name_gate = self.map_tensor_name(name.replace("gate_up_proj_scales", "gate_proj.weight"))
  6936. new_name_up = self.map_tensor_name(name.replace("gate_up_proj_scales", "up_proj.weight"))
  6937. self.repack_mxfp4(new_name_gate, blocks0, scales0)
  6938. self.repack_mxfp4(new_name_up, blocks1, scales1)
  6939. return []
  6940. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  6941. del bid # unused
  6942. if "sinks" in name:
  6943. name += ".weight"
  6944. # correct naming for down_proj
  6945. if "down_proj" in name:
  6946. if name.endswith("_bias"):
  6947. name = name.replace("down_proj_bias", "down_proj.bias")
  6948. elif "_blocks" not in name and "_scales" not in name:
  6949. logger.warning(f"{name} is not in MXFP4, performance may be degraded")
  6950. name = name.replace("down_proj", "down_proj.weight")
  6951. data_torch = data_torch.transpose(-1, -2)
  6952. else:
  6953. # otherwise, it should already be repacked to ggml MXFP4 format
  6954. return []
  6955. # split the gate_up into gate and up
  6956. if "gate_up_proj" in name:
  6957. if name.endswith("_bias"):
  6958. name_up = name.replace("gate_up_proj_bias", "up_proj.bias")
  6959. name_gate = name.replace("gate_up_proj_bias", "gate_proj.bias")
  6960. gate_proj_bias, up_proj_bias = data_torch[..., ::2], data_torch[..., 1::2]
  6961. return [
  6962. (self.map_tensor_name(name_gate), gate_proj_bias),
  6963. (self.map_tensor_name(name_up), up_proj_bias)
  6964. ]
  6965. elif "_blocks" not in name and "_scales" not in name:
  6966. logger.warning(f"{name} is not in MXFP4, performance may be degraded")
  6967. name_up = name.replace("gate_up_proj", "up_proj.weight")
  6968. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  6969. data_torch = data_torch.transpose(-1, -2)
  6970. gate_proj_weight, up_proj_weight = data_torch[:, ::2, :], data_torch[:, 1::2, :]
  6971. return [
  6972. (self.map_tensor_name(name_gate), gate_proj_weight),
  6973. (self.map_tensor_name(name_up), up_proj_weight)
  6974. ]
  6975. else:
  6976. # otherwise, it should already be repacked to ggml MXFP4 format
  6977. return []
  6978. return [(self.map_tensor_name(name), data_torch)]
  6979. def set_vocab(self):
  6980. self._set_vocab_gpt2()
  6981. def set_gguf_parameters(self):
  6982. super().set_gguf_parameters()
  6983. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  6984. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size"])
  6985. rope_scaling = self.hparams.get("rope_scaling") or {}
  6986. rope_type = rope_scaling.get("rope_type", rope_scaling.get("type"))
  6987. assert rope_type == "yarn", f"GPT-OSS only supports yarn rope scaling, got {rope_type}"
  6988. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  6989. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  6990. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling.get("original_max_position_embeddings", 4096))
  6991. @ModelBase.register("Lfm2ForCausalLM", "LFM2ForCausalLM")
  6992. class LFM2Model(TextModel):
  6993. model_arch = gguf.MODEL_ARCH.LFM2
  6994. def _add_feed_forward_length(self):
  6995. ff_dim = self.hparams["block_ff_dim"]
  6996. auto_adjust_ff_dim = self.hparams["block_auto_adjust_ff_dim"]
  6997. ff_dim = self.hparams["block_ff_dim"]
  6998. ffn_dim_multiplier = self.hparams["block_ffn_dim_multiplier"]
  6999. multiple_of = self.hparams["block_multiple_of"]
  7000. if auto_adjust_ff_dim:
  7001. ff_dim = int(2 * ff_dim / 3)
  7002. # custom dim factor multiplier
  7003. if ffn_dim_multiplier is not None:
  7004. ff_dim = int(ffn_dim_multiplier * ff_dim)
  7005. ff_dim = multiple_of * ((ff_dim + multiple_of - 1) // multiple_of)
  7006. self.gguf_writer.add_feed_forward_length(ff_dim)
  7007. def set_gguf_parameters(self):
  7008. # set num_key_value_heads only for attention layers
  7009. self.hparams["num_key_value_heads"] = [
  7010. self.hparams["num_key_value_heads"] if layer_type == "full_attention" else 0
  7011. for layer_type in self.hparams["layer_types"]
  7012. ]
  7013. super().set_gguf_parameters()
  7014. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  7015. self.gguf_writer.add_shortconv_l_cache(self.hparams["conv_L_cache"])
  7016. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["norm_eps"])
  7017. self._add_feed_forward_length()
  7018. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7019. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7020. if is_vision_tensor:
  7021. # skip vision tensors
  7022. return []
  7023. name = name.replace("language_model.", "")
  7024. # conv op requires 2d tensor
  7025. if 'conv.conv' in name:
  7026. data_torch = data_torch.squeeze(1)
  7027. return [(self.map_tensor_name(name), data_torch)]
  7028. @ModelBase.register("Lfm2VlForConditionalGeneration")
  7029. class LFM2VLModel(MmprojModel):
  7030. def __init__(self, *args, **kwargs):
  7031. super().__init__(*args, **kwargs)
  7032. assert self.hparams_vision is not None
  7033. # TODO(tarek): for dynamic resolution image_size is not specified, setting here for compatibility
  7034. self.hparams_vision["image_size"] = 256
  7035. def set_gguf_parameters(self):
  7036. super().set_gguf_parameters()
  7037. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.LFM2)
  7038. self.gguf_writer.add_vision_attention_layernorm_eps(self.find_vparam(["layer_norm_eps"]))
  7039. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("downsample_factor", 2))
  7040. self.gguf_writer.add_vision_use_gelu(True)
  7041. # python notation, e.g. for vision_feature_layer == -1, we pick last layer -> vision_feature_layers_to_drop = 0
  7042. vision_feature_layers_to_drop = -(self.global_config.get("vision_feature_layer", -1) + 1)
  7043. self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys) - vision_feature_layers_to_drop)
  7044. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7045. del bid # unused
  7046. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7047. if is_vision_tensor:
  7048. # remove "model." prefix
  7049. name = name.replace("model.vision_tower.", "vision_tower.")
  7050. name = name.replace("model.multi_modal_projector.", "multi_modal_projector.")
  7051. if "patch_embedding.weight" in name:
  7052. data_torch = data_torch.view(data_torch.shape[0], 16, 16, 3).permute(0, 3, 1, 2)
  7053. return [(self.map_tensor_name(name), data_torch)]
  7054. return [] # skip other tensors
  7055. @ModelBase.register("SmallThinkerForCausalLM")
  7056. class SmallThinkerModel(TextModel):
  7057. model_arch = gguf.MODEL_ARCH.SMALLTHINKER
  7058. def set_gguf_parameters(self):
  7059. super().set_gguf_parameters()
  7060. if (n_experts := self.hparams.get("num_experts", self.hparams.get("moe_num_primary_experts"))) is not None:
  7061. self.gguf_writer.add_expert_count(n_experts)
  7062. if (n_experts_used := self.hparams.get("num_experts_per_tok", self.hparams.get("moe_num_active_primary_experts"))) is not None:
  7063. self.gguf_writer.add_expert_used_count(n_experts_used)
  7064. if (moe_intermediate_size := self.hparams.get("moe_ffn_hidden_size")) is not None:
  7065. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  7066. self.gguf_writer.add_feed_forward_length(moe_intermediate_size)
  7067. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  7068. if (self.hparams.get('moe_primary_router_apply_softmax')):
  7069. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  7070. else:
  7071. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  7072. # YaRN is not enabled by default
  7073. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  7074. rope_scaling = self.hparams.get("rope_scaling") or {}
  7075. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  7076. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  7077. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  7078. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  7079. sliding_window_layout = self.hparams.get("sliding_window_layout")
  7080. if sliding_window_layout:
  7081. for i in sliding_window_layout:
  7082. if i != 0:
  7083. sliding_window = self.hparams.get("sliding_window_size")
  7084. if sliding_window:
  7085. self.gguf_writer.add_sliding_window(sliding_window)
  7086. break
  7087. _experts: list[dict[str, Tensor]] | None = None
  7088. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7089. # process the experts separately
  7090. if name.find("experts") != -1:
  7091. n_experts = self.hparams.get("num_experts", self.hparams.get("moe_num_primary_experts"))
  7092. assert bid is not None
  7093. if self._experts is None:
  7094. self._experts = [{} for _ in range(self.block_count)]
  7095. self._experts[bid][name] = data_torch
  7096. if len(self._experts[bid]) >= n_experts * 3:
  7097. tensors: list[tuple[str, Tensor]] = []
  7098. # merge the experts into a single 3d tensor
  7099. for w_name in ["down", "gate", "up"]:
  7100. datas: list[Tensor] = []
  7101. for xid in range(n_experts):
  7102. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  7103. datas.append(self._experts[bid][ename])
  7104. del self._experts[bid][ename]
  7105. data_torch = torch.stack(datas, dim=0)
  7106. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  7107. new_name = self.map_tensor_name(merged_name)
  7108. tensors.append((new_name, data_torch))
  7109. return tensors
  7110. else:
  7111. return []
  7112. return [(self.map_tensor_name(name), data_torch)]
  7113. def prepare_tensors(self):
  7114. super().prepare_tensors()
  7115. if self._experts is not None:
  7116. # flatten `list[dict[str, Tensor]]` into `list[str]`
  7117. experts = [k for d in self._experts for k in d.keys()]
  7118. if len(experts) > 0:
  7119. raise ValueError(f"Unprocessed experts: {experts}")
  7120. class MistralModel(LlamaModel):
  7121. model_arch = gguf.MODEL_ARCH.LLAMA
  7122. model_name = "Mistral"
  7123. hf_arch = ""
  7124. is_mistral_format = True
  7125. undo_permute = False
  7126. @staticmethod
  7127. def get_community_chat_template(vocab: MistralVocab, templates_dir: Path, is_mistral_format: bool):
  7128. assert TokenizerVersion is not None, "mistral_common is not installed"
  7129. assert isinstance(vocab.tokenizer, (Tekkenizer, SentencePieceTokenizer)), (
  7130. f"Expected Tekkenizer or SentencePieceTokenizer, got {type(vocab.tokenizer)}"
  7131. )
  7132. if vocab.tokenizer.version == TokenizerVersion.v1:
  7133. return "mistral-v1"
  7134. elif vocab.tokenizer.version == TokenizerVersion.v3 and vocab.tokenizer_type == MistralTokenizerType.spm:
  7135. return "mistral-v3"
  7136. elif vocab.tokenizer.version == TokenizerVersion.v3 and vocab.tokenizer_type == MistralTokenizerType.tekken:
  7137. return "mistral-v3-tekken"
  7138. elif vocab.tokenizer.version == TokenizerVersion.v7 and vocab.tokenizer_type == MistralTokenizerType.spm:
  7139. return "mistral-v7"
  7140. elif vocab.tokenizer.version == TokenizerVersion.v7 and vocab.tokenizer_type == MistralTokenizerType.tekken:
  7141. return "mistral-v7-tekken"
  7142. elif vocab.tokenizer.version == TokenizerVersion.v11:
  7143. template_file = "Mistral-Small-3.2-24B-Instruct-2506.jinja"
  7144. elif vocab.tokenizer.version == TokenizerVersion.v13:
  7145. template_file = "unsloth-mistral-Devstral-Small-2507.jinja"
  7146. else:
  7147. err_message = f"Unknown tokenizer type: {vocab.tokenizer_type} and version {vocab.tokenizer.version}"
  7148. if is_mistral_format:
  7149. err_message += (
  7150. " . Please pass --disable-mistral-community-chat-template argument to the CLI "
  7151. "if you want to skip this error and use the Mistral official `mistral-common` pre-processing library."
  7152. )
  7153. raise ValueError(err_message)
  7154. template_path = templates_dir / template_file
  7155. if not template_path.exists():
  7156. raise FileNotFoundError(f"Template file not found: {template_path}")
  7157. with open(template_path, "r", encoding="utf-8") as f:
  7158. template = f.read()
  7159. return template
  7160. class PixtralModel(LlavaVisionModel):
  7161. model_name = "Pixtral"
  7162. hf_arch = ""
  7163. is_mistral_format = True
  7164. def set_gguf_parameters(self):
  7165. super().set_gguf_parameters()
  7166. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PIXTRAL)
  7167. self.gguf_writer.add_vision_attention_layernorm_eps(
  7168. self.find_hparam(["norm_eps"])
  7169. )
  7170. self.gguf_writer.add_rope_freq_base(self.find_vparam(["rope_theta"]))
  7171. self.gguf_writer.add_vision_use_silu(True)
  7172. # spatial_merge_size
  7173. if self.find_vparam(["mm_projector_id"]) == "patch_merge":
  7174. self.gguf_writer.add_vision_spatial_merge_size(
  7175. self.find_vparam(["spatial_merge_size"])
  7176. )
  7177. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  7178. if name == "vision_language_adapter.w_in.weight":
  7179. return "mm.1.weight"
  7180. elif name == "vision_language_adapter.w_out.weight":
  7181. return "mm.2.weight"
  7182. return super().map_tensor_name(name, try_suffixes)
  7183. @ModelBase.register("KimiVLForConditionalGeneration")
  7184. class KimiVLModel(MmprojModel):
  7185. def __init__(self, *args, **kwargs):
  7186. super().__init__(*args, **kwargs)
  7187. assert self.hparams_vision is not None
  7188. self.hparams_vision["image_size"] = 64 * 14 # for compatibility
  7189. def set_gguf_parameters(self):
  7190. super().set_gguf_parameters()
  7191. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.KIMIVL)
  7192. self.gguf_writer.add_vision_use_gelu(True)
  7193. self.gguf_writer.add_vision_projector_scale_factor(2)
  7194. # eps is the same as pytorch's default value
  7195. assert self.hparams_vision is not None
  7196. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams_vision.get("layer_norm_eps", 1e-5))
  7197. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  7198. del bid # unused
  7199. is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name
  7200. if is_vision_tensor:
  7201. if "pos_emb.weight" in name:
  7202. data_torch = data_torch.view(data_torch.shape[0] * data_torch.shape[1], data_torch.shape[2])
  7203. elif "wqkv" in name:
  7204. split_dim = 0 if "weight" in name else -1
  7205. wq, wk, wv = data_torch.chunk(3, dim=split_dim)
  7206. return [
  7207. (self.map_tensor_name(name.replace("wqkv", "wq")), wq),
  7208. (self.map_tensor_name(name.replace("wqkv", "wk")), wk),
  7209. (self.map_tensor_name(name.replace("wqkv", "wv")), wv)
  7210. ]
  7211. return [(self.map_tensor_name(name), data_torch)]
  7212. return [] # skip other tensors
  7213. ###### CONVERSION LOGIC ######
  7214. # tree of lazy tensors
  7215. class LazyTorchTensor(gguf.LazyBase):
  7216. _tensor_type = torch.Tensor
  7217. # to keep the type-checker happy
  7218. dtype: torch.dtype
  7219. shape: torch.Size
  7220. # only used when converting a torch.Tensor to a np.ndarray
  7221. _dtype_map: dict[torch.dtype, type] = {
  7222. torch.float16: np.float16,
  7223. torch.float32: np.float32,
  7224. torch.uint8: np.uint8,
  7225. }
  7226. # used for safetensors slices
  7227. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  7228. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  7229. _dtype_str_map: dict[str, torch.dtype] = {
  7230. "F64": torch.float64,
  7231. "F32": torch.float32,
  7232. "BF16": torch.bfloat16,
  7233. "F16": torch.float16,
  7234. # "U64": torch.uint64,
  7235. "I64": torch.int64,
  7236. # "U32": torch.uint32,
  7237. "I32": torch.int32,
  7238. # "U16": torch.uint16,
  7239. "I16": torch.int16,
  7240. "U8": torch.uint8,
  7241. "I8": torch.int8,
  7242. "BOOL": torch.bool,
  7243. "F8_E4M3": torch.float8_e4m3fn,
  7244. "F8_E5M2": torch.float8_e5m2,
  7245. }
  7246. def numpy(self) -> gguf.LazyNumpyTensor:
  7247. dtype = self._dtype_map[self.dtype]
  7248. return gguf.LazyNumpyTensor(
  7249. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  7250. args=(self,),
  7251. func=(lambda s: s.numpy())
  7252. )
  7253. @classmethod
  7254. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  7255. return torch.empty(size=shape, dtype=dtype, device="meta")
  7256. @classmethod
  7257. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  7258. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  7259. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  7260. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  7261. return cast(torch.Tensor, lazy)
  7262. @classmethod
  7263. def from_remote_tensor(cls, remote_tensor: gguf.utility.RemoteTensor):
  7264. dtype = cls._dtype_str_map[remote_tensor.dtype]
  7265. shape = remote_tensor.shape
  7266. meta = cls.meta_with_dtype_and_shape(dtype, shape)
  7267. lazy = cls(meta=meta, args=(remote_tensor,), func=lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape))
  7268. return cast(torch.Tensor, lazy)
  7269. @classmethod
  7270. def __torch_function__(cls, func, types, args=(), kwargs=None):
  7271. del types # unused
  7272. if kwargs is None:
  7273. kwargs = {}
  7274. if func is torch.Tensor.numpy:
  7275. return args[0].numpy()
  7276. return cls._wrap_fn(func)(*args, **kwargs)
  7277. def parse_args() -> argparse.Namespace:
  7278. parser = argparse.ArgumentParser(
  7279. description="Convert a huggingface model to a GGML compatible file")
  7280. parser.add_argument(
  7281. "--vocab-only", action="store_true",
  7282. help="extract only the vocab",
  7283. )
  7284. parser.add_argument(
  7285. "--outfile", type=Path,
  7286. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  7287. )
  7288. parser.add_argument(
  7289. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
  7290. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  7291. )
  7292. parser.add_argument(
  7293. "--bigendian", action="store_true",
  7294. help="model is executed on big endian machine",
  7295. )
  7296. parser.add_argument(
  7297. "model", type=str,
  7298. help="directory containing model file or huggingface repository ID (if --remote)",
  7299. nargs="?",
  7300. )
  7301. parser.add_argument(
  7302. "--use-temp-file", action="store_true",
  7303. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  7304. )
  7305. parser.add_argument(
  7306. "--no-lazy", action="store_true",
  7307. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  7308. )
  7309. parser.add_argument(
  7310. "--model-name", type=str, default=None,
  7311. help="name of the model",
  7312. )
  7313. parser.add_argument(
  7314. "--verbose", action="store_true",
  7315. help="increase output verbosity",
  7316. )
  7317. parser.add_argument(
  7318. "--split-max-tensors", type=int, default=0,
  7319. help="max tensors in each split",
  7320. )
  7321. parser.add_argument(
  7322. "--split-max-size", type=str, default="0",
  7323. help="max size per split N(M|G)",
  7324. )
  7325. parser.add_argument(
  7326. "--dry-run", action="store_true",
  7327. help="only print out a split plan and exit, without writing any new files",
  7328. )
  7329. parser.add_argument(
  7330. "--no-tensor-first-split", action="store_true",
  7331. help="do not add tensors to the first split (disabled by default)"
  7332. )
  7333. parser.add_argument(
  7334. "--metadata", type=Path,
  7335. help="Specify the path for an authorship metadata override file"
  7336. )
  7337. parser.add_argument(
  7338. "--print-supported-models", action="store_true",
  7339. help="Print the supported models"
  7340. )
  7341. parser.add_argument(
  7342. "--remote", action="store_true",
  7343. help="(Experimental) Read safetensors file remotely without downloading to disk. Config and tokenizer files will still be downloaded. To use this feature, you need to specify Hugging Face model repo name instead of a local directory. For example: 'HuggingFaceTB/SmolLM2-1.7B-Instruct'. Note: To access gated repo, set HF_TOKEN environment variable to your Hugging Face token.",
  7344. )
  7345. parser.add_argument(
  7346. "--mmproj", action="store_true",
  7347. help="(Experimental) Export multimodal projector (mmproj) for vision models. This will only work on some vision models. A prefix 'mmproj-' will be added to the output file name.",
  7348. )
  7349. parser.add_argument(
  7350. "--mistral-format", action="store_true",
  7351. help="Whether the model is stored following the Mistral format.",
  7352. )
  7353. parser.add_argument(
  7354. "--disable-mistral-community-chat-template", action="store_true",
  7355. help=(
  7356. "Whether to disable usage of Mistral community chat templates. If set, use the Mistral official `mistral-common` library for tokenization and detokenization of Mistral models. "
  7357. "Using `mistral-common` ensure correctness and zero-day support of tokenization for models converted from the Mistral format but requires to manually setup the tokenization server."
  7358. )
  7359. )
  7360. args = parser.parse_args()
  7361. if not args.print_supported_models and args.model is None:
  7362. parser.error("the following arguments are required: model")
  7363. return args
  7364. def split_str_to_n_bytes(split_str: str) -> int:
  7365. if split_str.endswith("K"):
  7366. n = int(split_str[:-1]) * 1000
  7367. elif split_str.endswith("M"):
  7368. n = int(split_str[:-1]) * 1000 * 1000
  7369. elif split_str.endswith("G"):
  7370. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  7371. elif split_str.isnumeric():
  7372. n = int(split_str)
  7373. else:
  7374. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  7375. if n < 0:
  7376. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  7377. return n
  7378. def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> str:
  7379. # TODO @ngxson : this won't work correctly if the model has both audio & vision encoders
  7380. # maybe we should fallback to text model's arch in that case, since not many models have both
  7381. text_config = hparams.get("text_config", {})
  7382. vision_config = hparams.get("vision_config", {})
  7383. arch = None
  7384. if (arches := hparams.get("architectures")) is not None and len(arches) > 0:
  7385. arch = arches[0]
  7386. elif "ssm_cfg" in hparams:
  7387. # For non-hf Mamba and Mamba2 models
  7388. arch = hparams["ssm_cfg"].get("layer", "Mamba") + "ForCausalLM"
  7389. # if "architectures" is found in the sub-config, use that instead
  7390. if model_type == ModelType.TEXT and text_config.get("architectures") is not None:
  7391. arch = text_config["architectures"][0]
  7392. elif model_type == ModelType.MMPROJ and vision_config.get("architectures") is not None:
  7393. arch = vision_config["architectures"][0]
  7394. if arch is None:
  7395. raise ValueError("Failed to detect model architecture")
  7396. return arch
  7397. def main() -> None:
  7398. args = parse_args()
  7399. if args.print_supported_models:
  7400. logger.error("Supported models:")
  7401. ModelBase.print_registered_models()
  7402. sys.exit(0)
  7403. if args.verbose:
  7404. logging.basicConfig(level=logging.DEBUG)
  7405. else:
  7406. logging.basicConfig(level=logging.INFO)
  7407. if args.remote:
  7408. hf_repo_id = args.model
  7409. from huggingface_hub import snapshot_download
  7410. local_dir = snapshot_download(
  7411. repo_id=hf_repo_id,
  7412. allow_patterns=["LICENSE", "*.json", "*.md", "*.txt", "tokenizer.model"])
  7413. dir_model = Path(local_dir)
  7414. logger.info(f"Downloaded config and tokenizer to {local_dir}")
  7415. else:
  7416. hf_repo_id = None
  7417. dir_model = Path(args.model)
  7418. if not dir_model.is_dir():
  7419. logger.error(f'Error: {dir_model} is not a directory')
  7420. sys.exit(1)
  7421. ftype_map: dict[str, gguf.LlamaFileType] = {
  7422. "f32": gguf.LlamaFileType.ALL_F32,
  7423. "f16": gguf.LlamaFileType.MOSTLY_F16,
  7424. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  7425. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  7426. "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
  7427. "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
  7428. "auto": gguf.LlamaFileType.GUESSED,
  7429. }
  7430. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  7431. if args.use_temp_file and is_split:
  7432. logger.error("Error: Cannot use temp file when splitting")
  7433. sys.exit(1)
  7434. if args.outfile is not None:
  7435. fname_out = args.outfile
  7436. elif hf_repo_id:
  7437. # if remote, use the model ID as the output file name
  7438. fname_out = Path("./" + hf_repo_id.replace("/", "-") + "-{ftype}.gguf")
  7439. else:
  7440. fname_out = dir_model
  7441. logger.info(f"Loading model: {dir_model.name}")
  7442. if args.mmproj:
  7443. if "mmproj" not in fname_out.name:
  7444. fname_out = ModelBase.add_prefix_to_filename(fname_out, "mmproj-")
  7445. is_mistral_format = args.mistral_format
  7446. disable_mistral_community_chat_template = args.disable_mistral_community_chat_template
  7447. with torch.inference_mode():
  7448. output_type = ftype_map[args.outtype]
  7449. model_type = ModelType.MMPROJ if args.mmproj else ModelType.TEXT
  7450. hparams = ModelBase.load_hparams(dir_model, is_mistral_format)
  7451. if not is_mistral_format:
  7452. model_architecture = get_model_architecture(hparams, model_type)
  7453. logger.info(f"Model architecture: {model_architecture}")
  7454. try:
  7455. model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type)
  7456. except NotImplementedError:
  7457. logger.error(f"Model {model_architecture} is not supported")
  7458. sys.exit(1)
  7459. elif args.mmproj:
  7460. assert hparams.get("vision_encoder") is not None, "This model does not support multimodal"
  7461. model_class = PixtralModel
  7462. else:
  7463. model_class = MistralModel
  7464. model_instance = model_class(dir_model, output_type, fname_out,
  7465. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  7466. eager=args.no_lazy,
  7467. metadata_override=args.metadata, model_name=args.model_name,
  7468. split_max_tensors=args.split_max_tensors,
  7469. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  7470. small_first_shard=args.no_tensor_first_split,
  7471. remote_hf_model_id=hf_repo_id, disable_mistral_community_chat_template=disable_mistral_community_chat_template
  7472. )
  7473. if args.vocab_only:
  7474. logger.info("Exporting model vocab...")
  7475. model_instance.write_vocab()
  7476. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  7477. else:
  7478. logger.info("Exporting model...")
  7479. model_instance.write()
  7480. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  7481. logger.info(f"Model successfully exported to {out_path}")
  7482. if __name__ == '__main__':
  7483. main()