convert_hf_to_gguf.py 299 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import ast
  5. import logging
  6. import argparse
  7. import contextlib
  8. import json
  9. import os
  10. import re
  11. import sys
  12. from enum import IntEnum
  13. from pathlib import Path
  14. from hashlib import sha256
  15. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  16. from itertools import chain
  17. from transformers import AutoConfig
  18. import math
  19. import numpy as np
  20. import torch
  21. if TYPE_CHECKING:
  22. from torch import Tensor
  23. if 'NO_LOCAL_GGUF' not in os.environ:
  24. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  25. import gguf
  26. logger = logging.getLogger("hf-to-gguf")
  27. ###### MODEL DEFINITIONS ######
  28. class SentencePieceTokenTypes(IntEnum):
  29. NORMAL = 1
  30. UNKNOWN = 2
  31. CONTROL = 3
  32. USER_DEFINED = 4
  33. UNUSED = 5
  34. BYTE = 6
  35. class ModelType(IntEnum):
  36. TEXT = 1
  37. MMPROJ = 2
  38. AnyModel = TypeVar("AnyModel", bound="type[ModelBase]")
  39. class ModelBase:
  40. _model_classes: dict[ModelType, dict[str, type[ModelBase]]] = {
  41. ModelType.TEXT: {},
  42. ModelType.MMPROJ: {},
  43. }
  44. dir_model: Path
  45. ftype: gguf.LlamaFileType
  46. fname_out: Path
  47. is_big_endian: bool
  48. endianess: gguf.GGUFEndian
  49. use_temp_file: bool
  50. lazy: bool
  51. part_names: list[str]
  52. is_safetensors: bool
  53. hparams: dict[str, Any]
  54. tensor_names: set[str] | None
  55. gguf_writer: gguf.GGUFWriter
  56. model_name: str | None
  57. metadata_override: Path | None
  58. dir_model_card: Path
  59. remote_hf_model_id: str | None
  60. # subclasses should define this!
  61. model_arch: gguf.MODEL_ARCH
  62. # subclasses should initialize this!
  63. block_count: int
  64. tensor_map: gguf.TensorNameMap
  65. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, *, is_big_endian: bool = False,
  66. use_temp_file: bool = False, eager: bool = False,
  67. metadata_override: Path | None = None, model_name: str | None = None,
  68. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
  69. small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None):
  70. if type(self) is ModelBase or \
  71. type(self) is TextModel or \
  72. type(self) is MmprojModel:
  73. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  74. self.dir_model = dir_model
  75. self.ftype = ftype
  76. self.fname_out = fname_out
  77. self.is_big_endian = is_big_endian
  78. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  79. self.use_temp_file = use_temp_file
  80. self.lazy = not eager or (remote_hf_model_id is not None)
  81. self.remote_hf_model_id = remote_hf_model_id
  82. if remote_hf_model_id is not None:
  83. self.is_safetensors = True
  84. def get_remote_tensors() -> Iterator[tuple[str, Tensor]]:
  85. logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}")
  86. remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id)
  87. self.tensor_names = set(name for name in remote_tensors.keys())
  88. for name, remote_tensor in gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id).items():
  89. yield (name, LazyTorchTensor.from_remote_tensor(remote_tensor))
  90. self.get_tensors = get_remote_tensors
  91. else:
  92. self.part_names = ModelBase.get_model_part_names(self.dir_model, "model", ".safetensors")
  93. self.is_safetensors = len(self.part_names) > 0
  94. if not self.is_safetensors:
  95. self.part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  96. self.hparams = ModelBase.load_hparams(self.dir_model) if hparams is None else hparams
  97. self.tensor_names = None
  98. self.metadata_override = metadata_override
  99. self.model_name = model_name
  100. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  101. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  102. if self.ftype == gguf.LlamaFileType.GUESSED:
  103. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  104. _, first_tensor = next(self.get_tensors())
  105. if first_tensor.dtype == torch.float16:
  106. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  107. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  108. else:
  109. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  110. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  111. # Configure GGUF Writer
  112. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  113. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  114. @classmethod
  115. def add_prefix_to_filename(cls, path: Path, prefix: str) -> Path:
  116. stem, suffix = path.stem, path.suffix
  117. new_name = f"{prefix}{stem}{suffix}"
  118. return path.with_name(new_name)
  119. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  120. key = next((k for k in keys if k in self.hparams), None)
  121. if key is not None:
  122. return self.hparams[key]
  123. if optional:
  124. return None
  125. raise KeyError(f"could not find any of: {keys}")
  126. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  127. tensor_names_from_parts: set[str] = set()
  128. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  129. index_name += ".index.json"
  130. index_file = self.dir_model / index_name
  131. if index_file.is_file():
  132. self.tensor_names = set()
  133. logger.info(f"gguf: loading model weight map from '{index_name}'")
  134. with open(index_file, "r", encoding="utf-8") as f:
  135. index: dict[str, Any] = json.load(f)
  136. weight_map = index.get("weight_map")
  137. if weight_map is None or not isinstance(weight_map, dict):
  138. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  139. self.tensor_names.update(weight_map.keys())
  140. else:
  141. self.tensor_names = tensor_names_from_parts
  142. weight_map = {}
  143. for part_name in self.part_names:
  144. logger.info(f"gguf: loading model part '{part_name}'")
  145. ctx: ContextManager[Any]
  146. if self.is_safetensors:
  147. from safetensors import safe_open
  148. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  149. else:
  150. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  151. with ctx as model_part:
  152. tensor_names_from_parts.update(model_part.keys())
  153. for name in model_part.keys():
  154. if self.is_safetensors:
  155. if self.lazy:
  156. data = model_part.get_slice(name)
  157. data = LazyTorchTensor.from_safetensors_slice(data)
  158. else:
  159. data = model_part.get_tensor(name)
  160. else:
  161. data = model_part[name]
  162. if self.lazy:
  163. data = LazyTorchTensor.from_eager(data)
  164. yield name, data
  165. # verify tensor name presence and identify potentially missing files
  166. if len(tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  167. missing = sorted(self.tensor_names.difference(tensor_names_from_parts))
  168. extra = sorted(tensor_names_from_parts.difference(self.tensor_names))
  169. missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
  170. if len(extra) == 0 and len(missing_files) > 0:
  171. raise ValueError(f"Missing or incomplete model files: {missing_files}\n"
  172. f"Missing tensors: {missing}")
  173. else:
  174. raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
  175. f"Missing tensors: {missing}\n"
  176. f"Extra tensors: {extra}")
  177. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  178. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  179. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  180. name: str = gguf.TENSOR_NAMES[key]
  181. if "{bid}" in name:
  182. assert bid is not None
  183. name = name.format(bid=bid)
  184. return name + suffix
  185. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  186. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  187. return False
  188. key_name: str = gguf.TENSOR_NAMES[key]
  189. if "{bid}" in key_name:
  190. if bid is None:
  191. return False
  192. key_name = key_name.format(bid=bid)
  193. else:
  194. if bid is not None:
  195. return False
  196. return name == (key_name + suffix)
  197. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  198. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  199. if new_name is None:
  200. raise ValueError(f"Can not map tensor {name!r}")
  201. return new_name
  202. def set_gguf_parameters(self):
  203. raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses")
  204. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  205. del bid # unused
  206. return [(self.map_tensor_name(name), data_torch)]
  207. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  208. del name, new_name, bid, n_dims # unused
  209. return False
  210. # some models need extra generated tensors (like rope_freqs)
  211. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  212. return ()
  213. def prepare_tensors(self):
  214. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  215. for name, data_torch in chain(self.generate_extra_tensors(), self.get_tensors()):
  216. # we don't need these
  217. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  218. continue
  219. old_dtype = data_torch.dtype
  220. # convert any unsupported data types to float32
  221. if data_torch.dtype not in (torch.float16, torch.float32):
  222. data_torch = data_torch.to(torch.float32)
  223. # use the first number-like part of the tensor name as the block id
  224. bid = None
  225. for part in name.split("."):
  226. if part.isdecimal():
  227. bid = int(part)
  228. break
  229. for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
  230. # TODO: why do we squeeze here?
  231. # data = data_torch.squeeze().numpy()
  232. data = data_torch.numpy()
  233. # if data ends up empty, it means data_torch was a scalar tensor -> restore
  234. if len(data.shape) == 0:
  235. data = data_torch.numpy()
  236. n_dims = len(data.shape)
  237. data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
  238. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  239. if n_dims <= 1 or new_name.endswith("_norm.weight"):
  240. data_qtype = gguf.GGMLQuantizationType.F32
  241. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  242. # Some tensor types are always in float32
  243. if data_qtype is False and (
  244. any(
  245. self.match_model_tensor_name(new_name, key, bid)
  246. for key in (
  247. gguf.MODEL_TENSOR.FFN_GATE_INP,
  248. gguf.MODEL_TENSOR.POS_EMBD,
  249. gguf.MODEL_TENSOR.TOKEN_TYPES,
  250. gguf.MODEL_TENSOR.SSM_CONV1D,
  251. gguf.MODEL_TENSOR.TIME_MIX_FIRST,
  252. gguf.MODEL_TENSOR.TIME_MIX_W1,
  253. gguf.MODEL_TENSOR.TIME_MIX_W2,
  254. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
  255. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
  256. gguf.MODEL_TENSOR.TIME_MIX_LERP_FUSED,
  257. gguf.MODEL_TENSOR.POSNET_NORM1,
  258. gguf.MODEL_TENSOR.POSNET_NORM2,
  259. gguf.MODEL_TENSOR.V_ENC_EMBD_POS,
  260. gguf.MODEL_TENSOR.A_ENC_EMBD_POS,
  261. )
  262. )
  263. or not new_name.endswith(".weight")
  264. ):
  265. data_qtype = gguf.GGMLQuantizationType.F32
  266. if data_qtype is False and any(
  267. self.match_model_tensor_name(new_name, key, bid)
  268. for key in (
  269. gguf.MODEL_TENSOR.TOKEN_EMBD,
  270. gguf.MODEL_TENSOR.OUTPUT,
  271. )
  272. ):
  273. if self.ftype in (
  274. gguf.LlamaFileType.MOSTLY_TQ1_0,
  275. gguf.LlamaFileType.MOSTLY_TQ2_0,
  276. ):
  277. # TODO: use Q4_K and Q6_K
  278. data_qtype = gguf.GGMLQuantizationType.F16
  279. # No override (data_qtype is False), or wants to be quantized (data_qtype is True)
  280. if isinstance(data_qtype, bool):
  281. if self.ftype == gguf.LlamaFileType.ALL_F32:
  282. data_qtype = gguf.GGMLQuantizationType.F32
  283. elif self.ftype == gguf.LlamaFileType.MOSTLY_F16:
  284. data_qtype = gguf.GGMLQuantizationType.F16
  285. elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  286. data_qtype = gguf.GGMLQuantizationType.BF16
  287. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
  288. data_qtype = gguf.GGMLQuantizationType.Q8_0
  289. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
  290. data_qtype = gguf.GGMLQuantizationType.TQ1_0
  291. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
  292. data_qtype = gguf.GGMLQuantizationType.TQ2_0
  293. else:
  294. raise ValueError(f"Unknown file type: {self.ftype.name}")
  295. try:
  296. data = gguf.quants.quantize(data, data_qtype)
  297. except gguf.QuantError as e:
  298. logger.warning("%s, %s", e, "falling back to F16")
  299. data_qtype = gguf.GGMLQuantizationType.F16
  300. data = gguf.quants.quantize(data, data_qtype)
  301. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  302. # reverse shape to make it similar to the internal ggml dimension order
  303. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  304. # n_dims is implicit in the shape
  305. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  306. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  307. def set_type(self):
  308. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  309. def prepare_metadata(self, vocab_only: bool):
  310. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  311. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  312. # If we are using HF model id, set the metadata name to the model id
  313. if self.remote_hf_model_id:
  314. self.metadata.name = self.remote_hf_model_id
  315. # Fallback to model directory name if metadata name is still missing
  316. if self.metadata.name is None:
  317. self.metadata.name = self.dir_model.name
  318. # Generate parameter weight class (useful for leader boards) if not yet determined
  319. if self.metadata.size_label is None and total_params > 0:
  320. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  321. self.set_type()
  322. logger.info("Set meta model")
  323. self.metadata.set_gguf_meta_model(self.gguf_writer)
  324. logger.info("Set model parameters")
  325. self.set_gguf_parameters()
  326. logger.info("Set model quantization version")
  327. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  328. def write_vocab(self):
  329. raise NotImplementedError("write_vocab() must be implemented in subclasses")
  330. def write(self):
  331. self.prepare_tensors()
  332. self.prepare_metadata(vocab_only=False)
  333. self.gguf_writer.write_header_to_file(path=self.fname_out)
  334. self.gguf_writer.write_kv_data_to_file()
  335. self.gguf_writer.write_tensors_to_file(progress=True)
  336. self.gguf_writer.close()
  337. @staticmethod
  338. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  339. part_names: list[str] = []
  340. for filename in os.listdir(dir_model):
  341. if filename.startswith(prefix) and filename.endswith(suffix):
  342. part_names.append(filename)
  343. part_names.sort()
  344. return part_names
  345. @staticmethod
  346. def load_hparams(dir_model: Path):
  347. try:
  348. # for security reason, we don't allow loading remote code by default
  349. # if a model need remote code, we will fallback to config.json
  350. config = AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict()
  351. except Exception as e:
  352. logger.warning(f"Failed to load model config from {dir_model}: {e}")
  353. logger.warning("Trying to load config.json instead")
  354. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  355. config = json.load(f)
  356. if "llm_config" in config:
  357. # rename for InternVL
  358. config["text_config"] = config["llm_config"]
  359. if "thinker_config" in config:
  360. # rename for Qwen2.5-Omni
  361. config["text_config"] = config["thinker_config"]["text_config"]
  362. return config
  363. @classmethod
  364. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  365. assert names
  366. def func(modelcls: AnyModel) -> AnyModel:
  367. model_type = ModelType.MMPROJ if modelcls.model_arch == gguf.MODEL_ARCH.MMPROJ else ModelType.TEXT
  368. for name in names:
  369. cls._model_classes[model_type][name] = modelcls
  370. return modelcls
  371. return func
  372. @classmethod
  373. def print_registered_models(cls):
  374. for model_type, model_classes in cls._model_classes.items():
  375. logger.error(f"{model_type.name} models:")
  376. for name in sorted(model_classes.keys()):
  377. logger.error(f" - {name}")
  378. @classmethod
  379. def from_model_architecture(cls, arch: str, model_type = ModelType.TEXT) -> type[ModelBase]:
  380. try:
  381. return cls._model_classes[model_type][arch]
  382. except KeyError:
  383. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  384. class TextModel(ModelBase):
  385. model_type = ModelType.TEXT
  386. hf_arch: str
  387. def __init__(self, *args, **kwargs):
  388. super().__init__(*args, **kwargs)
  389. self.hf_arch = get_model_architecture(self.hparams, self.model_type)
  390. if "text_config" in self.hparams:
  391. # move the text_config to the root level
  392. self.hparams = {**self.hparams, **self.hparams["text_config"]}
  393. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  394. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  395. @classmethod
  396. def __init_subclass__(cls):
  397. # can't use an abstract property, because overriding it without type errors
  398. # would require using decorated functions instead of simply defining the property
  399. if "model_arch" not in cls.__dict__:
  400. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  401. def set_vocab(self):
  402. self._set_vocab_gpt2()
  403. def prepare_metadata(self, vocab_only: bool):
  404. super().prepare_metadata(vocab_only=vocab_only)
  405. total_params = self.gguf_writer.get_total_parameter_count()[0]
  406. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  407. output_type: str = self.ftype.name.partition("_")[2]
  408. # Filename Output
  409. if self.fname_out.is_dir():
  410. # Generate default filename based on model specification and available metadata
  411. if not vocab_only:
  412. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  413. else:
  414. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  415. # Use the default filename
  416. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  417. else:
  418. # Output path is a custom defined templated filename
  419. # Note: `not is_dir()` is used because `.is_file()` will not detect
  420. # file template strings as it doesn't actually exist as a file
  421. # Process templated file name with the output ftype, useful with the "auto" ftype
  422. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  423. logger.info("Set model tokenizer")
  424. self.set_vocab()
  425. def set_gguf_parameters(self):
  426. self.gguf_writer.add_block_count(self.block_count)
  427. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length"], optional=True)) is not None:
  428. self.gguf_writer.add_context_length(n_ctx)
  429. logger.info(f"gguf: context length = {n_ctx}")
  430. if (n_embd := self.find_hparam(["hidden_size", "n_embd", "dim"], optional=True)) is not None:
  431. self.gguf_writer.add_embedding_length(n_embd)
  432. logger.info(f"gguf: embedding length = {n_embd}")
  433. if (n_ff := self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"], optional=True)) is not None:
  434. self.gguf_writer.add_feed_forward_length(n_ff)
  435. logger.info(f"gguf: feed forward length = {n_ff}")
  436. if (n_head := self.find_hparam(["num_attention_heads", "n_head", "n_heads"], optional=True)) is not None:
  437. self.gguf_writer.add_head_count(n_head)
  438. logger.info(f"gguf: head count = {n_head}")
  439. if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
  440. self.gguf_writer.add_head_count_kv(n_head_kv)
  441. logger.info(f"gguf: key-value head count = {n_head_kv}")
  442. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  443. self.gguf_writer.add_rope_freq_base(rope_theta)
  444. logger.info(f"gguf: rope theta = {rope_theta}")
  445. if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
  446. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  447. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  448. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  449. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  450. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  451. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  452. self.gguf_writer.add_expert_count(n_experts)
  453. logger.info(f"gguf: expert count = {n_experts}")
  454. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  455. self.gguf_writer.add_expert_used_count(n_experts_used)
  456. logger.info(f"gguf: experts used count = {n_experts_used}")
  457. if (head_dim := self.hparams.get("head_dim")) is not None:
  458. # Workaround for incorrect AutoConfig value for DeepSeekV3 (is set correctly in DeepSeekV2Model class)
  459. # https://github.com/huggingface/transformers/blob/19224c3642705c5b6988c9f5f4251f83323d05ae/src/transformers/models/deepseek_v3/configuration_deepseek_v3.py#L210
  460. if self.hparams.get("model_type") != "deepseek_v3":
  461. self.gguf_writer.add_key_length(head_dim)
  462. self.gguf_writer.add_value_length(head_dim)
  463. self.gguf_writer.add_file_type(self.ftype)
  464. logger.info(f"gguf: file type = {self.ftype}")
  465. def write_vocab(self):
  466. if len(self.gguf_writer.tensors) != 1:
  467. raise ValueError('Splitting the vocabulary is not supported')
  468. self.prepare_metadata(vocab_only=True)
  469. self.gguf_writer.write_header_to_file(path=self.fname_out)
  470. self.gguf_writer.write_kv_data_to_file()
  471. self.gguf_writer.close()
  472. def does_token_look_special(self, token: str | bytes) -> bool:
  473. if isinstance(token, (bytes, bytearray)):
  474. token_text = token.decode(encoding="utf-8")
  475. elif isinstance(token, memoryview):
  476. token_text = token.tobytes().decode(encoding="utf-8")
  477. else:
  478. token_text = token
  479. # Some models mark some added tokens which ought to be control tokens as not special.
  480. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  481. seems_special = token_text in (
  482. "<pad>", # deepseek-coder
  483. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  484. )
  485. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  486. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  487. # TODO: should these be marked as UNUSED instead? (maybe not)
  488. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  489. return seems_special
  490. # used for GPT-2 BPE and WordPiece vocabs
  491. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  492. tokens: list[str] = []
  493. toktypes: list[int] = []
  494. from transformers import AutoTokenizer
  495. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  496. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  497. assert max(tokenizer.vocab.values()) < vocab_size
  498. tokpre = self.get_vocab_base_pre(tokenizer)
  499. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  500. added_vocab = tokenizer.get_added_vocab()
  501. added_tokens_decoder = tokenizer.added_tokens_decoder
  502. for i in range(vocab_size):
  503. if i not in reverse_vocab:
  504. tokens.append(f"[PAD{i}]")
  505. toktypes.append(gguf.TokenType.UNUSED)
  506. else:
  507. token: str = reverse_vocab[i]
  508. if token in added_vocab:
  509. # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
  510. # To avoid unexpected issues - we make sure to normalize non-normalized tokens
  511. if not added_tokens_decoder[i].normalized:
  512. previous_token = token
  513. token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
  514. if previous_token != token:
  515. logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer")
  516. if added_tokens_decoder[i].special or self.does_token_look_special(token):
  517. toktypes.append(gguf.TokenType.CONTROL)
  518. else:
  519. # NOTE: this was added for Gemma.
  520. # Encoding and decoding the tokens above isn't sufficient for this case.
  521. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  522. toktypes.append(gguf.TokenType.USER_DEFINED)
  523. else:
  524. toktypes.append(gguf.TokenType.NORMAL)
  525. tokens.append(token)
  526. return tokens, toktypes, tokpre
  527. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  528. # do not modify it manually!
  529. # ref: https://github.com/ggml-org/llama.cpp/pull/6920
  530. # Marker: Start get_vocab_base_pre
  531. def get_vocab_base_pre(self, tokenizer) -> str:
  532. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  533. # is specific for the BPE pre-tokenizer used by the model
  534. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  535. # use in llama.cpp to implement the same pre-tokenizer
  536. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  537. chktok = tokenizer.encode(chktxt)
  538. chkhsh = sha256(str(chktok).encode()).hexdigest()
  539. logger.debug(f"chktok: {chktok}")
  540. logger.debug(f"chkhsh: {chkhsh}")
  541. res = None
  542. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  543. # or pull the latest version of the model from Huggingface
  544. # don't edit the hashes manually!
  545. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  546. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  547. res = "llama-bpe"
  548. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  549. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  550. res = "deepseek-llm"
  551. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  552. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  553. res = "deepseek-coder"
  554. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  555. # ref: https://huggingface.co/tiiuae/falcon-7b
  556. res = "falcon"
  557. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  558. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  559. res = "bert-bge"
  560. if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e":
  561. # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base
  562. res = "falcon3"
  563. if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
  564. # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
  565. res = "bert-bge-large"
  566. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  567. # ref: https://huggingface.co/mosaicml/mpt-7b
  568. res = "mpt"
  569. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  570. # ref: https://huggingface.co/bigcode/starcoder2-3b
  571. res = "starcoder"
  572. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  573. # ref: https://huggingface.co/openai-community/gpt2
  574. res = "gpt-2"
  575. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  576. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  577. res = "stablelm2"
  578. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  579. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  580. res = "refact"
  581. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  582. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  583. res = "command-r"
  584. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  585. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  586. res = "qwen2"
  587. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  588. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  589. res = "olmo"
  590. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  591. # ref: https://huggingface.co/databricks/dbrx-base
  592. res = "dbrx"
  593. if chkhsh == "c7699093ba4255a91e702aa38a596aa81669f3525dae06c2953267dde580f448":
  594. # ref: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  595. res = "jina-v1-en"
  596. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  597. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  598. res = "jina-v2-en"
  599. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  600. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  601. res = "jina-v2-es"
  602. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  603. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  604. res = "jina-v2-de"
  605. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  606. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  607. res = "smaug-bpe"
  608. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  609. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  610. res = "poro-chat"
  611. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  612. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  613. res = "jina-v2-code"
  614. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  615. # ref: https://huggingface.co/LumiOpen/Viking-7B
  616. res = "viking"
  617. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  618. # ref: https://huggingface.co/core42/jais-13b
  619. res = "jais"
  620. if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f":
  621. # ref: https://huggingface.co/WisdomShell/CodeShell-7B
  622. res = "codeshell"
  623. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  624. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  625. res = "tekken"
  626. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  627. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  628. res = "smollm"
  629. if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
  630. # ref: https://huggingface.co/bigscience/bloom
  631. res = "bloom"
  632. if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
  633. # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
  634. res = "gpt3-finnish"
  635. if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
  636. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
  637. res = "exaone"
  638. if chkhsh == "fcace8b9cac38ce847670c970cd5892031a753a1ef381abd1d9af00f713da085":
  639. # ref: https://huggingface.co/microsoft/phi-2
  640. res = "phi-2"
  641. if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
  642. # ref: https://huggingface.co/facebook/chameleon-7b
  643. res = "chameleon"
  644. if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65":
  645. # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base
  646. res = "roberta-bpe"
  647. if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb":
  648. # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct
  649. res = "gigachat"
  650. if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
  651. # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
  652. res = "megrez"
  653. if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
  654. # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
  655. res = "deepseek-v3"
  656. if chkhsh == "b3f499bb4255f8ca19fccd664443283318f2fd2414d5e0b040fbdd0cc195d6c5":
  657. # ref: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
  658. res = "deepseek-r1-qwen"
  659. if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e":
  660. # ref: https://huggingface.co/Xenova/gpt-4o
  661. res = "gpt-4o"
  662. if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
  663. # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
  664. res = "superbpe"
  665. if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
  666. # ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
  667. res = "trillion"
  668. if chkhsh == "96a5f08be6259352137b512d4157e333e21df7edd3fcd152990608735a65b224":
  669. # ref: https://huggingface.co/inclusionAI/Ling-lite
  670. res = "bailingmoe"
  671. if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
  672. # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
  673. res = "llama4"
  674. if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3":
  675. # ref: https://huggingface.co/mistral-community/pixtral-12b
  676. res = "pixtral"
  677. if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
  678. # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
  679. res = "seed-coder"
  680. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
  681. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  682. res = "chatglm-bpe"
  683. if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
  684. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  685. res = "chatglm-bpe"
  686. if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
  687. # ref: https://huggingface.co/THUDM/glm-4-9b-hf
  688. res = "glm4"
  689. if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
  690. # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
  691. res = "minerva-7b"
  692. if res is None:
  693. logger.warning("\n")
  694. logger.warning("**************************************************************************************")
  695. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  696. logger.warning("** There are 2 possible reasons for this:")
  697. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  698. logger.warning("** - the pre-tokenization config has changed upstream")
  699. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  700. logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
  701. logger.warning("**")
  702. logger.warning(f"** chkhsh: {chkhsh}")
  703. logger.warning("**************************************************************************************")
  704. logger.warning("\n")
  705. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  706. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  707. logger.debug(f"chkhsh: {chkhsh}")
  708. return res
  709. # Marker: End get_vocab_base_pre
  710. def _set_vocab_none(self) -> None:
  711. self.gguf_writer.add_tokenizer_model("none")
  712. def _set_vocab_gpt2(self) -> None:
  713. tokens, toktypes, tokpre = self.get_vocab_base()
  714. self.gguf_writer.add_tokenizer_model("gpt2")
  715. self.gguf_writer.add_tokenizer_pre(tokpre)
  716. self.gguf_writer.add_token_list(tokens)
  717. self.gguf_writer.add_token_types(toktypes)
  718. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  719. special_vocab.add_to_gguf(self.gguf_writer)
  720. def _set_vocab_qwen(self):
  721. dir_model = self.dir_model
  722. hparams = self.hparams
  723. tokens: list[str] = []
  724. toktypes: list[int] = []
  725. from transformers import AutoTokenizer
  726. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  727. vocab_size = hparams["vocab_size"]
  728. assert max(tokenizer.get_vocab().values()) < vocab_size
  729. tokpre = self.get_vocab_base_pre(tokenizer)
  730. merges = []
  731. vocab = {}
  732. mergeable_ranks = tokenizer.mergeable_ranks
  733. for token, rank in mergeable_ranks.items():
  734. vocab[QwenModel.token_bytes_to_string(token)] = rank
  735. if len(token) == 1:
  736. continue
  737. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  738. assert len(merged) == 2
  739. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  740. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  741. added_vocab = tokenizer.special_tokens
  742. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  743. for i in range(vocab_size):
  744. if i not in reverse_vocab:
  745. tokens.append(f"[PAD{i}]")
  746. toktypes.append(gguf.TokenType.UNUSED)
  747. elif reverse_vocab[i] in added_vocab:
  748. tokens.append(reverse_vocab[i])
  749. toktypes.append(gguf.TokenType.CONTROL)
  750. else:
  751. tokens.append(reverse_vocab[i])
  752. toktypes.append(gguf.TokenType.NORMAL)
  753. self.gguf_writer.add_tokenizer_model("gpt2")
  754. self.gguf_writer.add_tokenizer_pre(tokpre)
  755. self.gguf_writer.add_token_list(tokens)
  756. self.gguf_writer.add_token_types(toktypes)
  757. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  758. special_vocab.merges = merges
  759. # only add special tokens when they were not already loaded from config.json
  760. if len(special_vocab.special_token_ids) == 0:
  761. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  762. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  763. # this one is usually not in config.json anyway
  764. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  765. special_vocab.add_to_gguf(self.gguf_writer)
  766. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  767. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  768. self.gguf_writer.add_tokenizer_model("llama")
  769. self.gguf_writer.add_tokenizer_pre("default")
  770. self.gguf_writer.add_token_list(tokens)
  771. self.gguf_writer.add_token_scores(scores)
  772. self.gguf_writer.add_token_types(toktypes)
  773. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  774. special_vocab.add_to_gguf(self.gguf_writer)
  775. def _create_vocab_sentencepiece(self):
  776. from sentencepiece import SentencePieceProcessor
  777. tokenizer_path = self.dir_model / 'tokenizer.model'
  778. if not tokenizer_path.is_file():
  779. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  780. tokenizer = SentencePieceProcessor()
  781. tokenizer.LoadFromFile(str(tokenizer_path))
  782. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  783. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  784. scores: list[float] = [-10000.0] * vocab_size
  785. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  786. for token_id in range(tokenizer.vocab_size()):
  787. piece = tokenizer.IdToPiece(token_id)
  788. text = piece.encode("utf-8")
  789. score = tokenizer.GetScore(token_id)
  790. toktype = SentencePieceTokenTypes.NORMAL
  791. if tokenizer.IsUnknown(token_id):
  792. toktype = SentencePieceTokenTypes.UNKNOWN
  793. elif tokenizer.IsControl(token_id):
  794. toktype = SentencePieceTokenTypes.CONTROL
  795. elif tokenizer.IsUnused(token_id):
  796. toktype = SentencePieceTokenTypes.UNUSED
  797. elif tokenizer.IsByte(token_id):
  798. toktype = SentencePieceTokenTypes.BYTE
  799. tokens[token_id] = text
  800. scores[token_id] = score
  801. toktypes[token_id] = toktype
  802. added_tokens_file = self.dir_model / 'added_tokens.json'
  803. if added_tokens_file.is_file():
  804. with open(added_tokens_file, "r", encoding="utf-8") as f:
  805. added_tokens_json = json.load(f)
  806. for key in added_tokens_json:
  807. token_id = added_tokens_json[key]
  808. if token_id >= vocab_size:
  809. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  810. continue
  811. tokens[token_id] = key.encode("utf-8")
  812. scores[token_id] = -1000.0
  813. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  814. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  815. if tokenizer_config_file.is_file():
  816. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  817. tokenizer_config_json = json.load(f)
  818. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  819. for token_id, token_data in added_tokens_decoder.items():
  820. token_id = int(token_id)
  821. token: str = token_data["content"]
  822. if token_id >= vocab_size:
  823. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  824. continue
  825. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  826. if tokens[token_id] != token.encode("utf-8"):
  827. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  828. if token_data.get("special") or self.does_token_look_special(token):
  829. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  830. else:
  831. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  832. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  833. scores[token_id] = -1000.0
  834. tokens[token_id] = token.encode("utf-8")
  835. if vocab_size > len(tokens):
  836. pad_count = vocab_size - len(tokens)
  837. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  838. for i in range(1, pad_count + 1):
  839. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  840. scores.append(-1000.0)
  841. toktypes.append(SentencePieceTokenTypes.UNUSED)
  842. return tokens, scores, toktypes
  843. def _set_vocab_llama_hf(self):
  844. vocab = gguf.LlamaHfVocab(self.dir_model)
  845. tokens = []
  846. scores = []
  847. toktypes = []
  848. for text, score, toktype in vocab.all_tokens():
  849. tokens.append(text)
  850. scores.append(score)
  851. toktypes.append(toktype)
  852. assert len(tokens) == vocab.vocab_size
  853. self.gguf_writer.add_tokenizer_model("llama")
  854. self.gguf_writer.add_tokenizer_pre("default")
  855. self.gguf_writer.add_token_list(tokens)
  856. self.gguf_writer.add_token_scores(scores)
  857. self.gguf_writer.add_token_types(toktypes)
  858. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  859. special_vocab.add_to_gguf(self.gguf_writer)
  860. def _set_vocab_rwkv_world(self):
  861. assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file()
  862. vocab_size = self.hparams.get("vocab_size", 65536)
  863. tokens: list[bytes] = ['<s>'.encode("utf-8")]
  864. toktypes: list[int] = [gguf.TokenType.CONTROL]
  865. with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f:
  866. lines = f.readlines()
  867. for line in lines:
  868. parts = line.split(' ')
  869. assert len(parts) >= 3
  870. token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1])
  871. token = token.encode("utf-8") if isinstance(token, str) else token
  872. assert isinstance(token, bytes)
  873. assert len(token) == token_len
  874. token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff"
  875. tokens.append(token_text.encode("utf-8"))
  876. toktypes.append(gguf.TokenType.NORMAL)
  877. remainder = vocab_size - len(tokens)
  878. assert remainder >= 0
  879. for i in range(len(tokens), vocab_size):
  880. tokens.append(f"[PAD{i}]".encode("utf-8"))
  881. toktypes.append(gguf.TokenType.UNUSED)
  882. self.gguf_writer.add_tokenizer_model("rwkv")
  883. self.gguf_writer.add_token_list(tokens)
  884. self.gguf_writer.add_token_types(toktypes)
  885. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  886. special_vocab.chat_template = "rwkv-world"
  887. # hack: Add '\n\n' as the EOT token to make it chat normally
  888. special_vocab._set_special_token("eot", 261)
  889. # hack: Override these as they have already been set (incorrectly)
  890. special_vocab.special_token_ids["bos"] = 0
  891. special_vocab.special_token_ids["eos"] = 0
  892. special_vocab.add_to_gguf(self.gguf_writer)
  893. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  894. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  895. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  896. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  897. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  898. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  899. assert field # tokenizer model
  900. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  901. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  902. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  903. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  904. assert field # token list
  905. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  906. if model_name == "llama-spm":
  907. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  908. assert field # token scores
  909. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  910. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  911. assert field # token types
  912. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  913. if model_name != "llama-spm":
  914. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  915. assert field # token merges
  916. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  917. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  918. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  919. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  920. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  921. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  922. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  923. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  924. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  925. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  926. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  927. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  928. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  929. def _try_set_pooling_type(self) -> None:
  930. # get pooling path
  931. pooling_path = None
  932. module_path = self.dir_model / "modules.json"
  933. if module_path.is_file():
  934. with open(module_path, encoding="utf-8") as f:
  935. modules = json.load(f)
  936. for mod in modules:
  937. if mod["type"] == "sentence_transformers.models.Pooling":
  938. pooling_path = mod["path"]
  939. break
  940. # get pooling type
  941. if pooling_path is not None:
  942. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  943. pooling = json.load(f)
  944. if pooling["pooling_mode_mean_tokens"]:
  945. pooling_type = gguf.PoolingType.MEAN
  946. elif pooling["pooling_mode_cls_token"]:
  947. pooling_type = gguf.PoolingType.CLS
  948. elif pooling["pooling_mode_lasttoken"]:
  949. pooling_type = gguf.PoolingType.LAST
  950. else:
  951. raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported")
  952. self.gguf_writer.add_pooling_type(pooling_type)
  953. class MmprojModel(ModelBase):
  954. model_type = ModelType.MMPROJ
  955. model_arch = gguf.MODEL_ARCH.MMPROJ
  956. preprocessor_config: dict[str, Any]
  957. global_config: dict[str, Any]
  958. n_block_keys = ["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"]
  959. has_vision_encoder: bool = True # by default
  960. has_audio_encoder: bool = False
  961. # for models having multiple encoders, we need to separate their hparams
  962. hparams_vision: dict[str, Any] | None = None
  963. hparams_audio: dict[str, Any] | None = None
  964. def __init__(self, *args, **kwargs):
  965. super().__init__(*args, **kwargs)
  966. if self.model_arch != gguf.MODEL_ARCH.MMPROJ:
  967. raise TypeError("MmprojModel must be subclassed with model_arch = gguf.MODEL_ARCH.MMPROJ")
  968. # get n_embd of the text model
  969. if "text_config" not in self.hparams:
  970. self.hparams["text_config"] = {}
  971. if "audio_config" not in self.hparams:
  972. self.hparams["audio_config"] = {}
  973. text_config = {**self.hparams, **self.hparams["text_config"]}
  974. self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0))
  975. assert self.n_embd_text > 0, "n_embd not found in hparams"
  976. # move vision config to the top level, while preserving the original hparams in global_config
  977. import copy
  978. self.global_config = copy.deepcopy(self.hparams)
  979. self.hparams_vision = self.get_vision_config()
  980. self.hparams_audio = self.get_audio_config()
  981. if self.hparams_vision is None and self.hparams_audio is None:
  982. raise ValueError("vision_config / audio_config not found in hparams")
  983. # for compat with vision-only models
  984. self.hparams = self.hparams_vision or self.hparams_audio or self.hparams
  985. # TODO @ngxson : this is a hack to support both vision and audio encoders
  986. have_multiple_encoders = self.has_audio_encoder and self.has_vision_encoder
  987. self.block_count = 128 if have_multiple_encoders else self.find_hparam(self.n_block_keys, True)
  988. self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.MMPROJ, self.block_count)
  989. # load preprocessor config
  990. with open(self.dir_model / "preprocessor_config.json", "r", encoding="utf-8") as f:
  991. self.preprocessor_config = json.load(f)
  992. def get_vision_config(self) -> dict[str, Any] | None:
  993. return self.global_config.get("vision_config")
  994. def get_audio_config(self) -> dict[str, Any] | None:
  995. return self.global_config.get("audio_config")
  996. def set_type(self):
  997. self.gguf_writer.add_type(gguf.GGUFType.MMPROJ)
  998. def set_gguf_parameters(self):
  999. self.gguf_writer.add_file_type(self.ftype)
  1000. if self.has_vision_encoder:
  1001. self.gguf_writer.add_clip_has_vision_encoder(True)
  1002. self.gguf_writer.add_vision_projection_dim(self.n_embd_text)
  1003. # vision config
  1004. self.gguf_writer.add_vision_image_size(self.find_vparam(["image_size"]))
  1005. self.gguf_writer.add_vision_patch_size(self.find_vparam(["patch_size"]))
  1006. self.gguf_writer.add_vision_embedding_length(self.find_vparam(["hidden_size"]))
  1007. self.gguf_writer.add_vision_feed_forward_length(self.find_vparam(["intermediate_size"]))
  1008. self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys))
  1009. self.gguf_writer.add_vision_head_count(self.find_vparam(["num_attention_heads"]))
  1010. # preprocessor config
  1011. self.gguf_writer.add_vision_image_mean(self.preprocessor_config["image_mean"])
  1012. self.gguf_writer.add_vision_image_std(self.preprocessor_config["image_std"])
  1013. if self.has_audio_encoder:
  1014. self.gguf_writer.add_clip_has_audio_encoder(True)
  1015. self.gguf_writer.add_audio_projection_dim(self.n_embd_text)
  1016. # audio config
  1017. self.gguf_writer.add_audio_embedding_length(self.find_aparam(["hidden_size"]))
  1018. self.gguf_writer.add_audio_feed_forward_length(self.find_aparam(["intermediate_size"]))
  1019. self.gguf_writer.add_audio_block_count(self.find_aparam(self.n_block_keys))
  1020. self.gguf_writer.add_audio_head_count(self.find_aparam(["num_attention_heads"]))
  1021. if not self.has_vision_encoder and not self.has_audio_encoder:
  1022. raise ValueError("MmprojModel must have either vision or audio encoder")
  1023. def write_vocab(self):
  1024. raise ValueError("MmprojModel does not support vocab writing")
  1025. def find_vparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1026. assert self.hparams_vision is not None
  1027. return self._find_param(self.hparams_vision, keys, optional)
  1028. def find_aparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  1029. assert self.hparams_audio is not None
  1030. return self._find_param(self.hparams_audio, keys, optional)
  1031. def _find_param(self, obj: dict[str, Any], keys: Iterable[str], optional: bool = False) -> Any:
  1032. key = next((k for k in keys if k in obj), None)
  1033. if key is not None:
  1034. return obj[key]
  1035. if optional:
  1036. return None
  1037. raise KeyError(f"could not find any of: {keys}")
  1038. @ModelBase.register("GPTNeoXForCausalLM")
  1039. class GPTNeoXModel(TextModel):
  1040. model_arch = gguf.MODEL_ARCH.GPTNEOX
  1041. def set_gguf_parameters(self):
  1042. block_count = self.hparams["num_hidden_layers"]
  1043. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1044. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1045. self.gguf_writer.add_block_count(block_count)
  1046. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1047. self.gguf_writer.add_rope_dimension_count(
  1048. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  1049. )
  1050. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1051. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  1052. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  1053. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1054. del bid # unused
  1055. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1056. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1057. tensors: list[tuple[str, Tensor]] = []
  1058. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  1059. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1060. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1061. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1062. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1063. data_torch = torch.cat(
  1064. (
  1065. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1066. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1067. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1068. ),
  1069. dim=0,
  1070. )
  1071. logger.info("re-format attention.linear_qkv.weight")
  1072. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  1073. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1074. data_torch = torch.cat(
  1075. (
  1076. qkv_bias[:, 0, :].reshape((n_embed,)),
  1077. qkv_bias[:, 1, :].reshape((n_embed,)),
  1078. qkv_bias[:, 2, :].reshape((n_embed,)),
  1079. ),
  1080. dim=0,
  1081. )
  1082. logger.info("re-format attention.linear_qkv.bias")
  1083. tensors.append((self.map_tensor_name(name), data_torch))
  1084. return tensors
  1085. @ModelBase.register("BloomForCausalLM", "BloomModel")
  1086. class BloomModel(TextModel):
  1087. model_arch = gguf.MODEL_ARCH.BLOOM
  1088. def set_gguf_parameters(self):
  1089. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1090. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1091. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  1092. self.gguf_writer.add_embedding_length(n_embed)
  1093. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  1094. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1095. self.gguf_writer.add_head_count(n_head)
  1096. self.gguf_writer.add_head_count_kv(n_head)
  1097. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1098. self.gguf_writer.add_file_type(self.ftype)
  1099. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1100. del bid # unused
  1101. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  1102. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  1103. name = re.sub(r'transformer\.', '', name)
  1104. tensors: list[tuple[str, Tensor]] = []
  1105. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  1106. # Map bloom-style qkv_linear to gpt-style qkv_linear
  1107. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  1108. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  1109. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  1110. data_torch = torch.cat(
  1111. (
  1112. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  1113. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  1114. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  1115. ),
  1116. dim=0,
  1117. )
  1118. logger.info("re-format attention.linear_qkv.weight")
  1119. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  1120. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  1121. data_torch = torch.cat(
  1122. (
  1123. qkv_bias[:, 0, :].reshape((n_embed,)),
  1124. qkv_bias[:, 1, :].reshape((n_embed,)),
  1125. qkv_bias[:, 2, :].reshape((n_embed,)),
  1126. ),
  1127. dim=0,
  1128. )
  1129. logger.info("re-format attention.linear_qkv.bias")
  1130. tensors.append((self.map_tensor_name(name), data_torch))
  1131. return tensors
  1132. @ModelBase.register("MPTForCausalLM")
  1133. class MPTModel(TextModel):
  1134. model_arch = gguf.MODEL_ARCH.MPT
  1135. def set_vocab(self):
  1136. try:
  1137. self._set_vocab_gpt2()
  1138. except Exception:
  1139. # Fallback for SEA-LION model
  1140. self._set_vocab_sentencepiece()
  1141. self.gguf_writer.add_add_bos_token(False)
  1142. self.gguf_writer.add_pad_token_id(3)
  1143. self.gguf_writer.add_eos_token_id(1)
  1144. self.gguf_writer.add_unk_token_id(0)
  1145. def set_gguf_parameters(self):
  1146. block_count = self.hparams["n_layers"]
  1147. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1148. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1149. self.gguf_writer.add_block_count(block_count)
  1150. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  1151. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1152. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  1153. self.gguf_writer.add_head_count_kv(kv_n_heads)
  1154. self.gguf_writer.add_layer_norm_eps(1e-5)
  1155. if self.hparams["attn_config"]["clip_qkv"] is not None:
  1156. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  1157. if self.hparams["attn_config"]["alibi"]:
  1158. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  1159. else:
  1160. self.gguf_writer.add_max_alibi_bias(0.0)
  1161. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1162. del bid # unused
  1163. if "scales" in name:
  1164. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  1165. new_name = new_name.replace("scales", "act.scales")
  1166. else:
  1167. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  1168. return [(new_name, data_torch)]
  1169. @ModelBase.register("OrionForCausalLM")
  1170. class OrionModel(TextModel):
  1171. model_arch = gguf.MODEL_ARCH.ORION
  1172. def set_vocab(self):
  1173. self._set_vocab_sentencepiece()
  1174. def set_gguf_parameters(self):
  1175. block_count = self.hparams["num_hidden_layers"]
  1176. head_count = self.hparams["num_attention_heads"]
  1177. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1178. ctx_length = 0
  1179. if "max_sequence_length" in self.hparams:
  1180. ctx_length = self.hparams["max_sequence_length"]
  1181. elif "max_position_embeddings" in self.hparams:
  1182. ctx_length = self.hparams["max_position_embeddings"]
  1183. elif "model_max_length" in self.hparams:
  1184. ctx_length = self.hparams["model_max_length"]
  1185. else:
  1186. raise ValueError("gguf: can not find ctx length parameter.")
  1187. self.gguf_writer.add_file_type(self.ftype)
  1188. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1189. self.gguf_writer.add_context_length(ctx_length)
  1190. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1191. self.gguf_writer.add_block_count(block_count)
  1192. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1193. self.gguf_writer.add_head_count(head_count)
  1194. self.gguf_writer.add_head_count_kv(head_count_kv)
  1195. # note: config provides rms norm but it is actually layer norm
  1196. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  1197. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  1198. @ModelBase.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  1199. class BaichuanModel(TextModel):
  1200. model_arch = gguf.MODEL_ARCH.BAICHUAN
  1201. def set_vocab(self):
  1202. self._set_vocab_sentencepiece()
  1203. def set_gguf_parameters(self):
  1204. block_count = self.hparams["num_hidden_layers"]
  1205. head_count = self.hparams["num_attention_heads"]
  1206. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1207. ctx_length = 0
  1208. if "max_sequence_length" in self.hparams:
  1209. ctx_length = self.hparams["max_sequence_length"]
  1210. elif "max_position_embeddings" in self.hparams:
  1211. ctx_length = self.hparams["max_position_embeddings"]
  1212. elif "model_max_length" in self.hparams:
  1213. ctx_length = self.hparams["model_max_length"]
  1214. else:
  1215. raise ValueError("gguf: can not find ctx length parameter.")
  1216. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1217. self.gguf_writer.add_context_length(ctx_length)
  1218. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1219. self.gguf_writer.add_block_count(block_count)
  1220. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1221. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1222. self.gguf_writer.add_head_count(head_count)
  1223. self.gguf_writer.add_head_count_kv(head_count_kv)
  1224. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1225. self.gguf_writer.add_file_type(self.ftype)
  1226. rope_scaling = self.hparams.get("rope_scaling") or {}
  1227. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1228. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1229. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1230. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1231. head_count = self.hparams["num_attention_heads"]
  1232. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1233. tensors: list[tuple[str, Tensor]] = []
  1234. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  1235. logger.info(f"Unpacking and permuting layer {bid}")
  1236. tensors = [
  1237. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  1238. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  1239. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  1240. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  1241. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  1242. self._reverse_hf_part(data_torch, 2)),
  1243. ]
  1244. else:
  1245. tensors = [(self.map_tensor_name(name), data_torch)]
  1246. return tensors
  1247. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1248. if n_kv_head is not None and n_head != n_kv_head:
  1249. n_head //= n_kv_head
  1250. return (
  1251. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1252. .swapaxes(1, 2)
  1253. .reshape(weights.shape)
  1254. )
  1255. def _reverse_hf_permute_part(
  1256. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  1257. ) -> Tensor:
  1258. r = weights.shape[0] // 3
  1259. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  1260. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  1261. r = weights.shape[0] // 3
  1262. return weights[r * n_part:r * n_part + r, ...]
  1263. @ModelBase.register("XverseForCausalLM")
  1264. class XverseModel(TextModel):
  1265. model_arch = gguf.MODEL_ARCH.XVERSE
  1266. def set_vocab(self):
  1267. assert (self.dir_model / "tokenizer.json").is_file()
  1268. dir_model = self.dir_model
  1269. hparams = self.hparams
  1270. tokens: list[bytes] = []
  1271. toktypes: list[int] = []
  1272. from transformers import AutoTokenizer
  1273. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  1274. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  1275. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  1276. # because vocab_size is the count of items, and indexes start at 0.
  1277. max_vocab_index = max(tokenizer.get_vocab().values())
  1278. if max_vocab_index >= vocab_size:
  1279. raise ValueError("Vocabulary size exceeds expected maximum size.")
  1280. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  1281. added_vocab = tokenizer.get_added_vocab()
  1282. for token_id in range(vocab_size):
  1283. token_text = reverse_vocab[token_id].encode('utf-8')
  1284. # replace "\x00" to string with length > 0
  1285. if token_text == b"\x00":
  1286. toktype = gguf.TokenType.BYTE # special
  1287. token_text = f"<{token_text}>".encode('utf-8')
  1288. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  1289. toktype = gguf.TokenType.BYTE # special
  1290. elif reverse_vocab[token_id] in added_vocab:
  1291. if tokenizer.added_tokens_decoder[token_id].special:
  1292. toktype = gguf.TokenType.CONTROL
  1293. else:
  1294. toktype = gguf.TokenType.USER_DEFINED
  1295. else:
  1296. toktype = gguf.TokenType.NORMAL
  1297. tokens.append(token_text)
  1298. toktypes.append(toktype)
  1299. self.gguf_writer.add_tokenizer_model("llama")
  1300. self.gguf_writer.add_tokenizer_pre("default")
  1301. self.gguf_writer.add_token_list(tokens)
  1302. self.gguf_writer.add_token_types(toktypes)
  1303. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  1304. special_vocab.add_to_gguf(self.gguf_writer)
  1305. def set_gguf_parameters(self):
  1306. block_count = self.hparams["num_hidden_layers"]
  1307. head_count = self.hparams["num_attention_heads"]
  1308. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1309. ctx_length = 0
  1310. if "max_sequence_length" in self.hparams:
  1311. ctx_length = self.hparams["max_sequence_length"]
  1312. elif "max_position_embeddings" in self.hparams:
  1313. ctx_length = self.hparams["max_position_embeddings"]
  1314. elif "model_max_length" in self.hparams:
  1315. ctx_length = self.hparams["model_max_length"]
  1316. else:
  1317. raise ValueError("gguf: can not find ctx length parameter.")
  1318. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1319. self.gguf_writer.add_context_length(ctx_length)
  1320. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1321. self.gguf_writer.add_block_count(block_count)
  1322. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1323. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1324. self.gguf_writer.add_head_count(head_count)
  1325. self.gguf_writer.add_head_count_kv(head_count_kv)
  1326. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1327. self.gguf_writer.add_file_type(self.ftype)
  1328. rope_scaling = self.hparams.get("rope_scaling") or {}
  1329. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1330. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1331. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1332. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1333. del bid # unused
  1334. head_count = self.hparams["num_attention_heads"]
  1335. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1336. # HF models permute some of the tensors, so we need to undo that
  1337. if name.endswith("q_proj.weight"):
  1338. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1339. if name.endswith("k_proj.weight"):
  1340. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1341. return [(self.map_tensor_name(name), data_torch)]
  1342. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1343. if n_kv_head is not None and n_head != n_kv_head:
  1344. n_head //= n_kv_head
  1345. return (
  1346. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1347. .swapaxes(1, 2)
  1348. .reshape(weights.shape)
  1349. )
  1350. @ModelBase.register("FalconForCausalLM", "RWForCausalLM")
  1351. class FalconModel(TextModel):
  1352. model_arch = gguf.MODEL_ARCH.FALCON
  1353. def set_gguf_parameters(self):
  1354. block_count = self.hparams.get("num_hidden_layers")
  1355. if block_count is None:
  1356. block_count = self.hparams["n_layer"] # old name
  1357. n_head = self.hparams.get("num_attention_heads")
  1358. if n_head is None:
  1359. n_head = self.hparams["n_head"] # old name
  1360. n_head_kv = self.hparams.get("num_kv_heads")
  1361. if n_head_kv is None:
  1362. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1363. self.gguf_writer.add_context_length(2048) # not in config.json
  1364. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1365. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1366. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1367. self.gguf_writer.add_block_count(block_count)
  1368. self.gguf_writer.add_head_count(n_head)
  1369. self.gguf_writer.add_head_count_kv(n_head_kv)
  1370. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1371. self.gguf_writer.add_file_type(self.ftype)
  1372. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1373. del bid # unused
  1374. # QKV tensor transform
  1375. # The original query_key_value tensor contains n_head_kv "kv groups",
  1376. # each consisting of n_head/n_head_kv query weights followed by one key
  1377. # and one value weight (shared by all query heads in the kv group).
  1378. # This layout makes it a big pain to work with in GGML.
  1379. # So we rearrange them here,, so that we have n_head query weights
  1380. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1381. # in contiguous fashion.
  1382. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1383. if "query_key_value" in name:
  1384. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1385. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1386. head_dim = self.hparams["hidden_size"] // n_head
  1387. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1388. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1389. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1390. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1391. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1392. return [(self.map_tensor_name(name), data_torch)]
  1393. @ModelBase.register("GPTBigCodeForCausalLM")
  1394. class StarCoderModel(TextModel):
  1395. model_arch = gguf.MODEL_ARCH.STARCODER
  1396. def set_gguf_parameters(self):
  1397. block_count = self.hparams["n_layer"]
  1398. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1399. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1400. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1401. self.gguf_writer.add_block_count(block_count)
  1402. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1403. self.gguf_writer.add_head_count_kv(1)
  1404. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1405. self.gguf_writer.add_file_type(self.ftype)
  1406. @ModelBase.register("GPTRefactForCausalLM")
  1407. class RefactModel(TextModel):
  1408. model_arch = gguf.MODEL_ARCH.REFACT
  1409. def set_vocab(self):
  1410. super().set_vocab()
  1411. # TODO: how to determine special FIM tokens automatically?
  1412. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1413. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1414. special_vocab._set_special_token("prefix", 1)
  1415. special_vocab._set_special_token("suffix", 3)
  1416. special_vocab._set_special_token("middle", 2)
  1417. special_vocab.chat_template = None # do not add it twice
  1418. special_vocab.add_to_gguf(self.gguf_writer)
  1419. def set_gguf_parameters(self):
  1420. hidden_dim = self.hparams["n_embd"]
  1421. inner_dim = 4 * hidden_dim
  1422. hidden_dim = int(2 * inner_dim / 3)
  1423. multiple_of = 256
  1424. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1425. block_count = self.hparams["n_layer"]
  1426. # refact uses Alibi. So this is from config.json which might be used by training.
  1427. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1428. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1429. self.gguf_writer.add_feed_forward_length(ff_dim)
  1430. self.gguf_writer.add_block_count(block_count)
  1431. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1432. self.gguf_writer.add_head_count_kv(1)
  1433. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1434. self.gguf_writer.add_file_type(self.ftype)
  1435. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1436. hidden_dim = self.hparams["n_embd"]
  1437. inner_dim = 4 * hidden_dim
  1438. hidden_dim = int(2 * inner_dim / 3)
  1439. multiple_of = 256
  1440. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1441. n_head = self.hparams["n_head"]
  1442. n_head_kv = 1
  1443. head_dim = self.hparams["n_embd"] // n_head
  1444. tensors: list[tuple[str, Tensor]] = []
  1445. if bid is not None:
  1446. if name == f"transformer.h.{bid}.attn.kv.weight":
  1447. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1448. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1449. elif name == f"transformer.h.{bid}.attn.q.weight":
  1450. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1451. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1452. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1453. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1454. if len(tensors) == 0:
  1455. tensors.append((self.map_tensor_name(name), data_torch))
  1456. return tensors
  1457. @ModelBase.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1458. class StableLMModel(TextModel):
  1459. model_arch = gguf.MODEL_ARCH.STABLELM
  1460. def set_vocab(self):
  1461. if (self.dir_model / "tokenizer.json").is_file():
  1462. self._set_vocab_gpt2()
  1463. else:
  1464. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1465. self._set_vocab_qwen()
  1466. def set_gguf_parameters(self):
  1467. hparams = self.hparams
  1468. block_count = hparams["num_hidden_layers"]
  1469. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1470. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1471. self.gguf_writer.add_block_count(block_count)
  1472. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1473. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1474. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1475. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1476. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1477. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1478. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1479. self.gguf_writer.add_file_type(self.ftype)
  1480. _q_norms: list[dict[str, Tensor]] | None = None
  1481. _k_norms: list[dict[str, Tensor]] | None = None
  1482. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1483. n_head = self.hparams["num_attention_heads"]
  1484. n_kv_head = self.hparams["num_key_value_heads"]
  1485. if name.find("q_layernorm.norms") != -1:
  1486. assert bid is not None
  1487. if self._q_norms is None:
  1488. self._q_norms = [{} for _ in range(self.block_count)]
  1489. self._q_norms[bid][name] = data_torch
  1490. if len(self._q_norms[bid]) >= n_head:
  1491. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1492. else:
  1493. return []
  1494. if name.find("k_layernorm.norms") != -1:
  1495. assert bid is not None
  1496. if self._k_norms is None:
  1497. self._k_norms = [{} for _ in range(self.block_count)]
  1498. self._k_norms[bid][name] = data_torch
  1499. if len(self._k_norms[bid]) >= n_kv_head:
  1500. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1501. else:
  1502. return []
  1503. return [(self.map_tensor_name(name), data_torch)]
  1504. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1505. datas: list[Tensor] = []
  1506. # extract the norms in order
  1507. for xid in range(n_head):
  1508. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1509. datas.append(norms[ename])
  1510. del norms[ename]
  1511. data_torch = torch.stack(datas, dim=0)
  1512. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1513. new_name = self.map_tensor_name(merged_name)
  1514. return [(new_name, data_torch)]
  1515. def prepare_tensors(self):
  1516. super().prepare_tensors()
  1517. if self._q_norms is not None or self._k_norms is not None:
  1518. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1519. norms = (
  1520. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1521. ) + (
  1522. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1523. )
  1524. if len(norms) > 0:
  1525. raise ValueError(f"Unprocessed norms: {norms}")
  1526. @ModelBase.register(
  1527. "LLaMAForCausalLM",
  1528. "LlamaForCausalLM",
  1529. "MistralForCausalLM",
  1530. "MixtralForCausalLM",
  1531. "VLlama3ForCausalLM",
  1532. "LlavaForConditionalGeneration",
  1533. "LlamaModel")
  1534. class LlamaModel(TextModel):
  1535. model_arch = gguf.MODEL_ARCH.LLAMA
  1536. undo_permute = True
  1537. def __init__(self, *args, **kwargs):
  1538. super().__init__(*args, **kwargs)
  1539. # fix for SmolVLM2, missing `num_attention_heads` in config.json
  1540. if self.hf_arch == "VLlama3ForCausalLM":
  1541. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32)
  1542. def set_vocab(self):
  1543. try:
  1544. self._set_vocab_sentencepiece()
  1545. except FileNotFoundError:
  1546. try:
  1547. self._set_vocab_llama_hf()
  1548. except (FileNotFoundError, TypeError):
  1549. # Llama 3
  1550. self._set_vocab_gpt2()
  1551. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1552. if self.hparams.get("vocab_size", 32000) == 32016:
  1553. special_vocab = gguf.SpecialVocab(
  1554. self.dir_model, load_merges=False,
  1555. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1556. )
  1557. special_vocab._set_special_token("prefix", 32007)
  1558. special_vocab._set_special_token("suffix", 32008)
  1559. special_vocab._set_special_token("middle", 32009)
  1560. special_vocab._set_special_token("eot", 32010)
  1561. special_vocab.add_to_gguf(self.gguf_writer)
  1562. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1563. if tokenizer_config_file.is_file():
  1564. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1565. tokenizer_config_json = json.load(f)
  1566. if "add_prefix_space" in tokenizer_config_json:
  1567. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1568. # Apply to granite small models only
  1569. if self.hparams.get("vocab_size", 32000) == 49152:
  1570. self.gguf_writer.add_add_bos_token(False)
  1571. def set_gguf_parameters(self):
  1572. super().set_gguf_parameters()
  1573. hparams = self.hparams
  1574. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1575. if "head_dim" in hparams:
  1576. rope_dim = hparams["head_dim"]
  1577. else:
  1578. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1579. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1580. rope_scaling = self.hparams.get("rope_scaling") or {}
  1581. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1582. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1583. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1584. @staticmethod
  1585. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1586. if n_head_kv is not None and n_head != n_head_kv:
  1587. n_head = n_head_kv
  1588. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1589. .swapaxes(1, 2)
  1590. .reshape(weights.shape))
  1591. _experts: list[dict[str, Tensor]] | None = None
  1592. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1593. n_head = self.hparams["num_attention_heads"]
  1594. n_kv_head = self.hparams.get("num_key_value_heads")
  1595. is_vision_tensor = "vision_tower" in name \
  1596. or "vision_model" in name \
  1597. or "model.connector" in name \
  1598. or "multi_modal_projector" in name
  1599. if is_vision_tensor:
  1600. return [] # skip vision tensors
  1601. elif self.hf_arch == "LlamaModel":
  1602. name = "model." + name
  1603. elif name.startswith("model.text_model"):
  1604. name = name.replace("text_model.", "") # for SmolVLM
  1605. elif name.startswith("language_model."):
  1606. name = name.replace("language_model.", "") # for the rest
  1607. if self.undo_permute:
  1608. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1609. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1610. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1611. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1612. # process the experts separately
  1613. if name.find("block_sparse_moe.experts") != -1:
  1614. n_experts = self.hparams["num_local_experts"]
  1615. assert bid is not None
  1616. if self._experts is None:
  1617. self._experts = [{} for _ in range(self.block_count)]
  1618. self._experts[bid][name] = data_torch
  1619. if len(self._experts[bid]) >= n_experts * 3:
  1620. tensors: list[tuple[str, Tensor]] = []
  1621. # merge the experts into a single 3d tensor
  1622. for wid in ["w1", "w2", "w3"]:
  1623. datas: list[Tensor] = []
  1624. for xid in range(n_experts):
  1625. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1626. datas.append(self._experts[bid][ename])
  1627. del self._experts[bid][ename]
  1628. data_torch = torch.stack(datas, dim=0)
  1629. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1630. new_name = self.map_tensor_name(merged_name)
  1631. tensors.append((new_name, data_torch))
  1632. return tensors
  1633. else:
  1634. return []
  1635. return [(self.map_tensor_name(name), data_torch)]
  1636. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1637. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1638. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1639. base = self.hparams.get("rope_theta", 10000.0)
  1640. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1641. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1642. factor = rope_scaling.get("factor", 8.0)
  1643. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1644. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1645. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1646. low_freq_wavelen = old_context_len / low_freq_factor
  1647. high_freq_wavelen = old_context_len / high_freq_factor
  1648. # assert low_freq_wavelen != high_freq_wavelen # Errors for Llama4
  1649. rope_factors = []
  1650. for freq in freqs:
  1651. wavelen = 2 * math.pi / freq
  1652. if wavelen < high_freq_wavelen:
  1653. rope_factors.append(1)
  1654. elif wavelen > low_freq_wavelen:
  1655. rope_factors.append(factor)
  1656. else:
  1657. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1658. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1659. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1660. def prepare_tensors(self):
  1661. super().prepare_tensors()
  1662. if self._experts is not None:
  1663. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1664. experts = [k for d in self._experts for k in d.keys()]
  1665. if len(experts) > 0:
  1666. raise ValueError(f"Unprocessed experts: {experts}")
  1667. @ModelBase.register("ArceeForCausalLM")
  1668. class ArceeModel(LlamaModel):
  1669. model_arch = gguf.MODEL_ARCH.ARCEE
  1670. def set_gguf_parameters(self):
  1671. super().set_gguf_parameters()
  1672. self._try_set_pooling_type()
  1673. rope_scaling = self.hparams.get("rope_scaling") or {}
  1674. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  1675. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  1676. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1677. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  1678. @ModelBase.register(
  1679. "LlavaForConditionalGeneration", # pixtral
  1680. "Mistral3ForConditionalGeneration", # mistral small 3.1
  1681. )
  1682. class LlavaVisionModel(MmprojModel):
  1683. img_break_tok_id = -1
  1684. def __init__(self, *args, **kwargs):
  1685. super().__init__(*args, **kwargs)
  1686. if self.hparams["model_type"] == "pixtral":
  1687. # layer_norm_eps is not in config.json, it is hard-coded in modeling_pixtral.py
  1688. self.hparams["layer_norm_eps"] = self.hparams.get("layer_norm_eps", 1e-5)
  1689. self.img_break_tok_id = self.get_token_id("[IMG_BREAK]")
  1690. logger.info(f"Image break token id: {self.img_break_tok_id}")
  1691. else:
  1692. raise ValueError(f"Unsupported model type: {self.hparams['model_type']}")
  1693. def get_token_id(self, token: str) -> int:
  1694. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1695. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1696. added_tokens_decoder = json.load(f)['added_tokens_decoder']
  1697. for id_, token_data in added_tokens_decoder.items():
  1698. if token_data["content"] == token:
  1699. return int(id_)
  1700. raise ValueError(f"Token '{token}' not found in tokenizer config.")
  1701. def set_gguf_parameters(self):
  1702. super().set_gguf_parameters()
  1703. hparams = self.hparams
  1704. if hparams["model_type"] == "pixtral":
  1705. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PIXTRAL)
  1706. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  1707. # hidden_act
  1708. if hparams["hidden_act"] == "silu":
  1709. self.gguf_writer.add_vision_use_silu(True)
  1710. elif hparams["hidden_act"] == "gelu":
  1711. self.gguf_writer.add_vision_use_gelu(True)
  1712. else:
  1713. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  1714. # spatial_merge_size
  1715. if "spatial_merge_size" in self.global_config:
  1716. self.gguf_writer.add_vision_spatial_merge_size(self.global_config["spatial_merge_size"])
  1717. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1718. del bid # unused
  1719. n_head = self.hparams["num_attention_heads"]
  1720. n_kv_head = n_head
  1721. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower."):
  1722. # process vision tensors
  1723. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1724. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1725. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1726. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1727. return [(self.map_tensor_name(name), data_torch)]
  1728. if self.img_break_tok_id > 0 and "embed_tokens.weight" in name:
  1729. logger.info(f"Extracting [IMG_BREAK] token embedding from {name}")
  1730. # for pixtral model, we need to extract the [IMG_BREAK] token embedding
  1731. img_break_embd = data_torch[self.img_break_tok_id]
  1732. name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK]
  1733. return [(self.map_tensor_name(name), img_break_embd)]
  1734. return [] # skip other tensors
  1735. @ModelBase.register("Idefics3ForConditionalGeneration", "SmolVLMForConditionalGeneration")
  1736. class SmolVLMModel(MmprojModel):
  1737. def __init__(self, *args, **kwargs):
  1738. super().__init__(*args, **kwargs)
  1739. if self.hparams["model_type"] == "smolvlm_vision":
  1740. # fix for SmolVLM2, missing some keys in config.json
  1741. # default values are taken from transformers code
  1742. self.hparams["hidden_size"] = self.hparams.get("hidden_size", 1152)
  1743. self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 16)
  1744. self.hparams["intermediate_size"] = self.hparams.get("intermediate_size", 3072)
  1745. def set_gguf_parameters(self):
  1746. super().set_gguf_parameters()
  1747. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.IDEFICS3)
  1748. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  1749. self.gguf_writer.add_vision_projector_scale_factor(self.global_config.get("scale_factor", 2))
  1750. self.gguf_writer.add_vision_use_gelu(True)
  1751. def tensor_force_quant(self, name, new_name, bid, n_dims):
  1752. del bid, new_name, n_dims # unused
  1753. if ".embeddings." in name:
  1754. return gguf.GGMLQuantizationType.F32
  1755. return False
  1756. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1757. del bid # unused
  1758. is_vision_tensor = "vision_tower" in name or "vision_model" in name or "model.connector" in name
  1759. if is_vision_tensor:
  1760. return [(self.map_tensor_name(name), data_torch)]
  1761. return [] # skip other tensors
  1762. @ModelBase.register("Llama4ForConditionalGeneration")
  1763. class Llama4Model(LlamaModel):
  1764. model_arch = gguf.MODEL_ARCH.LLAMA4
  1765. undo_permute = False
  1766. def __init__(self, *args, **kwargs):
  1767. super().__init__(*args, **kwargs)
  1768. # IMPORTANT: the normal "intermediate_size" is renamed to "intermediate_size_mlp", we need to undo this
  1769. self.hparams["intermediate_size_moe"] = self.hparams["intermediate_size"]
  1770. self.hparams["intermediate_size"] = self.hparams["intermediate_size_mlp"]
  1771. def set_vocab(self):
  1772. self._set_vocab_gpt2()
  1773. self.gguf_writer.add_add_bos_token(True)
  1774. def set_gguf_parameters(self):
  1775. super().set_gguf_parameters()
  1776. self.gguf_writer.add_interleave_moe_layer_step(self.hparams["interleave_moe_layer_step"])
  1777. self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
  1778. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  1779. if name.startswith("language_model."):
  1780. name = name.replace("language_model.", "")
  1781. # split the gate_up into gate and up
  1782. if "gate_up_proj" in name:
  1783. name_up = name.replace("gate_up_proj", "up_proj.weight")
  1784. name_gate = name.replace("gate_up_proj", "gate_proj.weight")
  1785. dim_half = data_torch.shape[-1] // 2
  1786. gate_proj_weight, up_proj_weight = data_torch.transpose(-1, -2).split(dim_half, dim=-2)
  1787. return [
  1788. (self.map_tensor_name(name_gate), gate_proj_weight),
  1789. (self.map_tensor_name(name_up), up_proj_weight)
  1790. ]
  1791. if name.endswith("down_proj"):
  1792. name += ".weight"
  1793. data_torch = data_torch.transpose(-1, -2)
  1794. if "multi_modal_projector" in name or "vision_model" in name:
  1795. return []
  1796. return super().modify_tensors(data_torch, name, bid)
  1797. @ModelBase.register("Llama4ForConditionalGeneration")
  1798. class Llama4VisionModel(MmprojModel):
  1799. def set_gguf_parameters(self):
  1800. super().set_gguf_parameters()
  1801. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.LLAMA4)
  1802. self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams["norm_eps"])
  1803. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / self.hparams["pixel_shuffle_ratio"]))
  1804. assert self.hparams["hidden_act"] == "gelu"
  1805. self.gguf_writer.add_vision_use_gelu(True)
  1806. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1807. del bid # unused
  1808. if "multi_modal_projector" in name or "vision_model" in name:
  1809. # process vision tensors
  1810. if "positional_embedding_vlm" in name and ".weight" not in name:
  1811. name += ".weight"
  1812. if "multi_modal_projector.linear_1" in name:
  1813. # despite the name with number postfix, this is a single fully connected layer
  1814. return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC], data_torch)]
  1815. return [(self.map_tensor_name(name), data_torch)]
  1816. return []
  1817. @ModelBase.register("Mistral3ForConditionalGeneration")
  1818. class Mistral3Model(LlamaModel):
  1819. model_arch = gguf.MODEL_ARCH.LLAMA
  1820. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  1821. name = name.replace("language_model.", "")
  1822. if "multi_modal_projector" in name or "vision_tower" in name:
  1823. return []
  1824. return super().modify_tensors(data_torch, name, bid)
  1825. @ModelBase.register("DeciLMForCausalLM")
  1826. class DeciModel(TextModel):
  1827. model_arch = gguf.MODEL_ARCH.DECI
  1828. @staticmethod
  1829. def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int:
  1830. # DeciLM-specific code
  1831. intermediate_size = int(2 * ffn_mult * n_embd / 3)
  1832. return DeciModel._find_multiple(intermediate_size, 256)
  1833. @staticmethod
  1834. def _find_multiple(n: int, k: int) -> int:
  1835. # DeciLM-specific code
  1836. if n % k == 0:
  1837. return n
  1838. return n + k - (n % k)
  1839. def __init__(self, *args, **kwargs):
  1840. super().__init__(*args, **kwargs)
  1841. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  1842. _block_configs: list[dict[str,Any]] = self.hparams["block_configs"]
  1843. assert self.block_count == len(_block_configs)
  1844. self._num_kv_heads = list()
  1845. self._num_heads = list()
  1846. _ffn_multipliers = list()
  1847. # ***linear attention layer***
  1848. # if n_heads_in_group is None and replace_with_linear is True
  1849. # then _num_kv_heads[il] is 0 and _num_heads[il] is num_attention_heads
  1850. # ***attention-free layer***
  1851. # if n_heads_in_group is None and replace_with_linear is False
  1852. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0
  1853. # ***normal attention-layer***
  1854. # if n_heads_in_group is not None, then
  1855. # _num_kv_heads[il] is num_attention_head // n_heads_in_group and
  1856. # _num_heads[il] is num_attention_head
  1857. # ***dummy layer*** for nemotron 253B
  1858. # if n_heads_in_group is None and ffn_mult is None
  1859. # then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
  1860. for il in range(len(_block_configs)):
  1861. if _block_configs[il]["attention"]["n_heads_in_group"] is None:
  1862. if _block_configs[il]["attention"]["replace_with_linear"] is True:
  1863. self._num_kv_heads.append(0)
  1864. self._num_heads.append(self.hparams["num_attention_heads"])
  1865. else:
  1866. self._num_kv_heads.append(0)
  1867. self._num_heads.append(0)
  1868. else:
  1869. self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
  1870. self._num_heads.append(self.hparams["num_attention_heads"])
  1871. if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
  1872. _ffn_multipliers.append(0.0)
  1873. else:
  1874. _ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
  1875. assert self.block_count == len(self._num_kv_heads)
  1876. assert self.block_count == len(self._num_heads)
  1877. assert self.block_count == len(_ffn_multipliers)
  1878. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  1879. assert isinstance(self._num_heads, list) and isinstance(self._num_heads[0], int)
  1880. assert isinstance(_ffn_multipliers, list) and isinstance(_ffn_multipliers[0], float)
  1881. self._ffn_dims: list[int] = [
  1882. DeciModel._ffn_mult_to_intermediate_size(multiplier, self.hparams["hidden_size"])
  1883. for multiplier in _ffn_multipliers
  1884. ]
  1885. def set_vocab(self):
  1886. # Please change tokenizer_config.json of Llama-3_1-Nemotron-51B's
  1887. # eos_token from '|eot_id|' to '|end_of_text|'
  1888. if self.hparams.get("vocab_size", 128256) == 128256:
  1889. tokens, toktypes, tokpre = self.get_vocab_base()
  1890. self.gguf_writer.add_tokenizer_model("gpt2")
  1891. self.gguf_writer.add_tokenizer_pre(tokpre)
  1892. self.gguf_writer.add_token_list(tokens)
  1893. self.gguf_writer.add_token_types(toktypes)
  1894. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  1895. special_vocab.add_to_gguf(self.gguf_writer)
  1896. else:
  1897. # DeciLM-7B
  1898. self._set_vocab_llama_hf()
  1899. def set_gguf_parameters(self):
  1900. if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
  1901. assert self.block_count == len(self._num_kv_heads)
  1902. assert self.block_count == len(self._num_heads)
  1903. assert self.block_count == len(self._ffn_dims)
  1904. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  1905. self.gguf_writer.add_rope_freq_base(rope_theta)
  1906. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  1907. self.gguf_writer.add_head_count(self._num_heads)
  1908. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  1909. self.gguf_writer.add_block_count(self.block_count)
  1910. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1911. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1912. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1913. self.gguf_writer.add_key_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1914. self.gguf_writer.add_value_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1915. self.gguf_writer.add_file_type(self.ftype)
  1916. else: # DeciLM-7B
  1917. super().set_gguf_parameters()
  1918. if "num_key_value_heads_per_layer" in self.hparams: # DeciLM-7B
  1919. self._num_kv_heads: list[int] = self.hparams["num_key_value_heads_per_layer"]
  1920. assert self.block_count == len(self._num_kv_heads)
  1921. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  1922. hparams = self.hparams
  1923. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1924. if "head_dim" in hparams:
  1925. rope_dim = hparams["head_dim"]
  1926. else:
  1927. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1928. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1929. rope_scaling = self.hparams.get("rope_scaling") or {}
  1930. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  1931. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1932. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  1933. @staticmethod
  1934. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1935. if n_head_kv is not None and n_head != n_head_kv:
  1936. n_head = n_head_kv
  1937. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1938. .swapaxes(1, 2)
  1939. .reshape(weights.shape))
  1940. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1941. n_head = self.hparams["num_attention_heads"]
  1942. if bid is not None:
  1943. if "num_key_value_heads_per_layer" in self.hparams:
  1944. n_kv_head = self.hparams["num_key_value_heads_per_layer"][bid]
  1945. elif "block_configs" in self.hparams:
  1946. n_kv_head = self._num_kv_heads[bid]
  1947. n_head = self._num_heads[bid]
  1948. else:
  1949. n_kv_head = self.hparams.get("num_key_value_heads")
  1950. else:
  1951. n_kv_head = self.hparams.get("num_key_value_heads")
  1952. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1953. data_torch = DeciModel.permute(data_torch, n_head, n_head)
  1954. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1955. data_torch = DeciModel.permute(data_torch, n_head, n_kv_head)
  1956. return [(self.map_tensor_name(name), data_torch)]
  1957. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1958. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1959. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1960. base = self.hparams.get("rope_theta", 10000.0)
  1961. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1962. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1963. factor = rope_scaling.get("factor", 8.0)
  1964. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1965. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1966. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1967. low_freq_wavelen = old_context_len / low_freq_factor
  1968. high_freq_wavelen = old_context_len / high_freq_factor
  1969. assert low_freq_wavelen != high_freq_wavelen
  1970. rope_factors = []
  1971. for freq in freqs:
  1972. wavelen = 2 * math.pi / freq
  1973. if wavelen < high_freq_wavelen:
  1974. rope_factors.append(1)
  1975. elif wavelen > low_freq_wavelen:
  1976. rope_factors.append(factor)
  1977. else:
  1978. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1979. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1980. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1981. def prepare_tensors(self):
  1982. super().prepare_tensors()
  1983. @ModelBase.register("BitnetForCausalLM")
  1984. class BitnetModel(TextModel):
  1985. model_arch = gguf.MODEL_ARCH.BITNET
  1986. def set_vocab(self):
  1987. self._set_vocab_sentencepiece()
  1988. def set_gguf_parameters(self):
  1989. super().set_gguf_parameters()
  1990. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1991. self.gguf_writer.add_rope_scaling_factor(1.0)
  1992. def weight_quant(self, weight: Tensor) -> Tensor:
  1993. dtype = weight.dtype
  1994. weight = weight.float()
  1995. scale = weight.abs().mean().clamp(min=1e-5)
  1996. iscale = 1 / scale
  1997. # TODO: multiply by the scale directly instead of inverting it twice
  1998. # (this is also unnecessarily doubly inverted upstream)
  1999. # ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
  2000. result = (weight * iscale).round().clamp(-1, 1) / iscale
  2001. return result.type(dtype)
  2002. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2003. new_name = self.map_tensor_name(name)
  2004. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  2005. gguf.MODEL_TENSOR.ATTN_Q,
  2006. gguf.MODEL_TENSOR.ATTN_K,
  2007. gguf.MODEL_TENSOR.ATTN_V,
  2008. gguf.MODEL_TENSOR.ATTN_OUT,
  2009. gguf.MODEL_TENSOR.FFN_UP,
  2010. gguf.MODEL_TENSOR.FFN_DOWN,
  2011. gguf.MODEL_TENSOR.FFN_GATE,
  2012. ]):
  2013. # transform weight into 1/0/-1 (in fp32)
  2014. data_torch = self.weight_quant(data_torch)
  2015. yield (new_name, data_torch)
  2016. @ModelBase.register("GrokForCausalLM")
  2017. class GrokModel(TextModel):
  2018. model_arch = gguf.MODEL_ARCH.GROK
  2019. def set_vocab(self):
  2020. self._set_vocab_sentencepiece()
  2021. def __init__(self, *args, **kwargs):
  2022. super().__init__(*args, **kwargs)
  2023. def set_gguf_parameters(self):
  2024. super().set_gguf_parameters()
  2025. _experts: list[dict[str, Tensor]] | None = None
  2026. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2027. # process the experts separately
  2028. if name.find(".moe.") != -1:
  2029. n_experts = self.hparams["num_local_experts"]
  2030. assert bid is not None
  2031. if self._experts is None:
  2032. self._experts = [{} for _ in range(self.block_count)]
  2033. self._experts[bid][name] = data_torch
  2034. if len(self._experts[bid]) >= n_experts * 3:
  2035. tensors: list[tuple[str, Tensor]] = []
  2036. # merge the experts into a single 3d tensor
  2037. for wid in ["linear", "linear_1", "linear_v"]:
  2038. datas: list[Tensor] = []
  2039. for xid in range(n_experts):
  2040. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
  2041. datas.append(self._experts[bid][ename])
  2042. del self._experts[bid][ename]
  2043. data_torch = torch.stack(datas, dim=0)
  2044. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
  2045. new_name = self.map_tensor_name(merged_name)
  2046. tensors.append((new_name, data_torch))
  2047. return tensors
  2048. else:
  2049. return []
  2050. return [(self.map_tensor_name(name), data_torch)]
  2051. @ModelBase.register("DbrxForCausalLM")
  2052. class DbrxModel(TextModel):
  2053. model_arch = gguf.MODEL_ARCH.DBRX
  2054. def set_gguf_parameters(self):
  2055. ffn_config = self.hparams["ffn_config"]
  2056. attn_config = self.hparams["attn_config"]
  2057. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  2058. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  2059. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  2060. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  2061. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  2062. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  2063. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  2064. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  2065. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  2066. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  2067. self.gguf_writer.add_layer_norm_eps(1e-5)
  2068. self.gguf_writer.add_file_type(self.ftype)
  2069. logger.info(f"gguf: file type = {self.ftype}")
  2070. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2071. del bid # unused
  2072. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  2073. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  2074. n_embd = self.hparams["d_model"]
  2075. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  2076. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  2077. # But llama.cpp moe graph works differently
  2078. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  2079. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  2080. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2081. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  2082. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  2083. experts = False
  2084. for exp_tensor_name in exp_tensor_names.keys():
  2085. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  2086. experts = True
  2087. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  2088. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  2089. data_torch = data_torch.permute(*permute_tensor)
  2090. break
  2091. # map tensor names
  2092. # In MoE models the ffn tensors are typically most of the model weights,
  2093. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  2094. # Every other model has the weight names ending in .weight,
  2095. # let's assume that is the convention which is not the case for dbrx:
  2096. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  2097. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  2098. return [(new_name, data_torch)]
  2099. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  2100. del name, new_name, bid # unused
  2101. return n_dims > 1
  2102. @ModelBase.register("MiniCPMForCausalLM")
  2103. class MiniCPMModel(TextModel):
  2104. model_arch = gguf.MODEL_ARCH.MINICPM
  2105. def set_gguf_parameters(self):
  2106. super().set_gguf_parameters()
  2107. embedding_scale = float(self.hparams["scale_emb"])
  2108. self.gguf_writer.add_embedding_scale(embedding_scale)
  2109. logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}")
  2110. residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5
  2111. self.gguf_writer.add_residual_scale(residual_scale)
  2112. logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}")
  2113. logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"]
  2114. self.gguf_writer.add_logit_scale(logit_scale)
  2115. logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}")
  2116. rope_scaling = self.hparams.get("rope_scaling") or {}
  2117. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "longrope":
  2118. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE)
  2119. logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}")
  2120. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2121. rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  2122. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2123. if rope_scaling is not None:
  2124. long_factors = rope_scaling.get('long_factor', None)
  2125. short_factors = rope_scaling.get('short_factor', None)
  2126. if long_factors is None or short_factors is None:
  2127. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2128. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2129. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2130. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2131. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2132. def set_vocab(self):
  2133. self._set_vocab_sentencepiece()
  2134. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2135. del bid # unused
  2136. n_head = self.hparams["num_attention_heads"]
  2137. n_kv_head = self.hparams.get("num_key_value_heads")
  2138. # HF models permute some of the tensors, so we need to undo that
  2139. if name.endswith(("q_proj.weight")):
  2140. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2141. if name.endswith(("k_proj.weight")):
  2142. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2143. return [(self.map_tensor_name(name), data_torch)]
  2144. @ModelBase.register("MiniCPM3ForCausalLM")
  2145. class MiniCPM3Model(TextModel):
  2146. model_arch = gguf.MODEL_ARCH.MINICPM3
  2147. def set_gguf_parameters(self):
  2148. hparams = self.hparams
  2149. self.gguf_writer.add_file_type(self.ftype)
  2150. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2151. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2152. self.gguf_writer.add_block_count(self.block_count)
  2153. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2154. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2155. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  2156. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2157. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2158. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2159. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2160. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2161. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2162. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2163. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2164. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2165. if rope_scaling is not None:
  2166. rope_dims = self.hparams["qk_rope_head_dim"]
  2167. long_factors = rope_scaling.get('long_factor', None)
  2168. short_factors = rope_scaling.get('short_factor', None)
  2169. if long_factors is None or short_factors is None:
  2170. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2171. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2172. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  2173. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2174. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2175. def set_vocab(self):
  2176. self._set_vocab_sentencepiece()
  2177. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  2178. if n_kv_head is not None and n_head != n_kv_head:
  2179. n_head //= n_kv_head
  2180. return (
  2181. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  2182. .swapaxes(1, 2)
  2183. .reshape(weights.shape)
  2184. )
  2185. @ModelBase.register("QWenLMHeadModel")
  2186. class QwenModel(TextModel):
  2187. model_arch = gguf.MODEL_ARCH.QWEN
  2188. @staticmethod
  2189. def token_bytes_to_string(b):
  2190. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2191. byte_encoder = bytes_to_unicode()
  2192. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2193. @staticmethod
  2194. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2195. parts = [bytes([b]) for b in token]
  2196. while True:
  2197. min_idx = None
  2198. min_rank = None
  2199. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2200. rank = mergeable_ranks.get(pair[0] + pair[1])
  2201. if rank is not None and (min_rank is None or rank < min_rank):
  2202. min_idx = i
  2203. min_rank = rank
  2204. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2205. break
  2206. assert min_idx is not None
  2207. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2208. return parts
  2209. def set_vocab(self):
  2210. self._set_vocab_qwen()
  2211. def set_gguf_parameters(self):
  2212. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2213. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2214. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2215. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2216. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  2217. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  2218. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2219. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2220. self.gguf_writer.add_file_type(self.ftype)
  2221. @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM", "Qwen2AudioForConditionalGeneration")
  2222. class Qwen2Model(TextModel):
  2223. model_arch = gguf.MODEL_ARCH.QWEN2
  2224. def set_vocab(self):
  2225. try:
  2226. self._set_vocab_sentencepiece()
  2227. except FileNotFoundError:
  2228. self._set_vocab_gpt2()
  2229. def set_gguf_parameters(self):
  2230. super().set_gguf_parameters()
  2231. self._try_set_pooling_type()
  2232. rope_scaling = self.hparams.get("rope_scaling") or {}
  2233. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2234. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2235. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2236. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2237. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2238. if self.hf_arch == "Qwen2Model":
  2239. name = f"model.{name}" # map to Qwen2ForCausalLM tensors
  2240. if "language_model." in name:
  2241. name = name.replace("language_model.", "") # for InternVL
  2242. if name.startswith("mlp") or name.startswith("multi_modal_projector") \
  2243. or name.startswith("vision_model") or name.startswith("audio_tower"):
  2244. # skip vision and audio tensors
  2245. return []
  2246. yield from super().modify_tensors(data_torch, name, bid)
  2247. @ModelBase.register(
  2248. "Qwen2VLModel",
  2249. "Qwen2VLForConditionalGeneration",
  2250. "Qwen2_5_VLForConditionalGeneration",
  2251. "Qwen2_5OmniModel",
  2252. )
  2253. class Qwen2VLModel(TextModel):
  2254. model_arch = gguf.MODEL_ARCH.QWEN2VL
  2255. def set_gguf_parameters(self):
  2256. super().set_gguf_parameters()
  2257. mrope_section = self.hparams["rope_scaling"]["mrope_section"]
  2258. mrope_section += [0] * max(0, 4 - len(mrope_section))
  2259. self.gguf_writer.add_rope_dimension_sections(mrope_section)
  2260. def set_vocab(self):
  2261. try:
  2262. self._set_vocab_sentencepiece()
  2263. except FileNotFoundError:
  2264. self._set_vocab_gpt2()
  2265. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2266. del bid # unused
  2267. if name.startswith("thinker."):
  2268. name = name.replace("thinker.", "")
  2269. if name.startswith("visual") or name.startswith("audio") or \
  2270. name.startswith("talker") or name.startswith("token2wav"):
  2271. # skip multimodal tensors
  2272. return []
  2273. return [(self.map_tensor_name(name), data_torch)]
  2274. @ModelBase.register("Qwen2VLModel", "Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
  2275. class Qwen2VLVisionModel(MmprojModel):
  2276. def __init__(self, *args, **kwargs):
  2277. super().__init__(*args, **kwargs)
  2278. assert self.hparams_vision is not None
  2279. self.hparams_vision["image_size"] = self.hparams_vision.get("image_size", 560)
  2280. # rename config.json values
  2281. self.hparams_vision["num_attention_heads"] = self.hparams_vision.get("num_heads")
  2282. self.hparams_vision["num_hidden_layers"] = self.hparams_vision.get("depth")
  2283. if "embed_dim" in self.hparams_vision: # qwen2vl
  2284. self.hparams_vision["intermediate_size"] = self.hparams_vision.get("hidden_size")
  2285. self.hparams_vision["hidden_size"] = self.hparams_vision.get("embed_dim")
  2286. def set_gguf_parameters(self):
  2287. super().set_gguf_parameters()
  2288. assert self.hparams_vision is not None
  2289. hparams = self.hparams_vision
  2290. model_type = self.global_config['model_type']
  2291. if model_type == 'qwen2_vl':
  2292. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL)
  2293. elif model_type == 'qwen2_5_vl' or model_type == 'qwen2_5_omni':
  2294. if model_type == 'qwen2_5_omni':
  2295. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25O)
  2296. else:
  2297. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL)
  2298. self.gguf_writer.add_vision_use_silu(True)
  2299. # find n_wa_pattern (window attention pattern)
  2300. fullatt_block_indexes = hparams.get("fullatt_block_indexes")
  2301. assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
  2302. n_wa_pattern = fullatt_block_indexes[0] + 1
  2303. # validate n_wa_pattern
  2304. for i in range(1, len(fullatt_block_indexes)):
  2305. if fullatt_block_indexes[i] - fullatt_block_indexes[i - 1] != n_wa_pattern:
  2306. raise ValueError(f"Invalid fullatt_block_indexes: {fullatt_block_indexes}")
  2307. self.gguf_writer.add_vision_n_wa_pattern(n_wa_pattern)
  2308. else:
  2309. raise ValueError(f"Unknown QwenVL model type: {self.global_config['model_type']}")
  2310. # default values below are taken from HF tranformers code
  2311. self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
  2312. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2313. del bid, name, n_dims # unused
  2314. if ".patch_embd." in new_name:
  2315. return gguf.GGMLQuantizationType.F16
  2316. if ".position_embd." in new_name:
  2317. return gguf.GGMLQuantizationType.F32
  2318. return False
  2319. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2320. del bid # unused
  2321. if name.startswith("visual."):
  2322. # process visual tensors
  2323. # split QKV tensors if needed
  2324. if ".qkv." in name:
  2325. if data_torch.ndim == 2: # weight
  2326. c3, _ = data_torch.shape
  2327. else: # bias
  2328. c3 = data_torch.shape[0]
  2329. assert c3 % 3 == 0
  2330. c = c3 // 3
  2331. wq = data_torch[:c]
  2332. wk = data_torch[c: c * 2]
  2333. wv = data_torch[c * 2:]
  2334. return [
  2335. (self.map_tensor_name(name.replace("qkv", "q")), wq),
  2336. (self.map_tensor_name(name.replace("qkv", "k")), wk),
  2337. (self.map_tensor_name(name.replace("qkv", "v")), wv),
  2338. ]
  2339. elif 'patch_embed.proj.weight' in name:
  2340. # split Conv3D into Conv2Ds
  2341. c1, c2, kt, kh, kw = data_torch.shape
  2342. del c1, c2, kh, kw # unused
  2343. assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
  2344. return [
  2345. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]),
  2346. (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]),
  2347. ]
  2348. else:
  2349. return [(self.map_tensor_name(name), data_torch)]
  2350. return [] # skip other tensors
  2351. @ModelBase.register("Qwen2_5OmniModel")
  2352. class Qwen25OmniModel(Qwen2VLVisionModel):
  2353. has_vision_encoder = True
  2354. has_audio_encoder = True
  2355. def __init__(self, *args, **kwargs):
  2356. super().__init__(*args, **kwargs)
  2357. assert self.hparams_audio is not None
  2358. self.hparams_audio["hidden_size"] = self.hparams_audio["d_model"]
  2359. self.hparams_audio["intermediate_size"] = self.hparams_audio["encoder_ffn_dim"]
  2360. self.hparams_audio["num_attention_heads"] = self.hparams_audio["encoder_attention_heads"]
  2361. def set_gguf_parameters(self):
  2362. super().set_gguf_parameters()
  2363. assert self.hparams_audio is not None
  2364. self.gguf_writer.add_audio_num_mel_bins(self.hparams_audio["num_mel_bins"])
  2365. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams_audio.get("layer_norm_eps", 1e-5))
  2366. def get_vision_config(self) -> dict[str, Any] | None:
  2367. return self.global_config["thinker_config"].get("vision_config")
  2368. def get_audio_config(self) -> dict[str, Any] | None:
  2369. return self.global_config["thinker_config"].get("audio_config")
  2370. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2371. # SinusoidsPositionEmbedding
  2372. assert self.hparams_audio is not None
  2373. max_timescale = 10000
  2374. length = 1500
  2375. channels = self.hparams_audio["hidden_size"]
  2376. log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
  2377. inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float())
  2378. scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
  2379. pos_embd = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1).to(dtype=torch.float32)
  2380. yield ("audio_tower.embed_positions.weight", pos_embd)
  2381. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2382. del bid, new_name, n_dims # unused
  2383. if ".conv" in name and ".weight" in name:
  2384. return gguf.GGMLQuantizationType.F16
  2385. return False
  2386. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2387. if name.startswith("thinker."):
  2388. name = name.replace("thinker.", "")
  2389. if name.startswith("audio_tower"):
  2390. # process audio tensors
  2391. if "conv1.bias" in name or "conv2.bias" in name:
  2392. # transpose conv1 and conv2 bias
  2393. data_torch = data_torch.unsqueeze(-1)
  2394. if "audio_bos_eos_token" in name:
  2395. # this tensor is left unused in transformers code
  2396. # https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809
  2397. return []
  2398. return [(self.map_tensor_name(name), data_torch)]
  2399. return super().modify_tensors(data_torch, name, bid)
  2400. @ModelBase.register("InternVisionModel")
  2401. class InternVisionModel(MmprojModel):
  2402. def set_gguf_parameters(self):
  2403. super().set_gguf_parameters()
  2404. hparams = self.hparams
  2405. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.INTERNVL)
  2406. self.gguf_writer.add_vision_attention_layernorm_eps(hparams["layer_norm_eps"])
  2407. # hidden_act
  2408. if hparams["hidden_act"] == "silu":
  2409. self.gguf_writer.add_vision_use_silu(True)
  2410. elif hparams["hidden_act"] == "gelu":
  2411. self.gguf_writer.add_vision_use_gelu(True)
  2412. else:
  2413. raise ValueError(f"Unsupported hidden_act: {hparams['hidden_act']}")
  2414. # downsample_ratio
  2415. downsample_ratio = self.global_config.get("downsample_ratio")
  2416. assert downsample_ratio is not None
  2417. self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / downsample_ratio))
  2418. def tensor_force_quant(self, name, new_name, bid, n_dims):
  2419. del bid, name, n_dims # unused
  2420. if ".patch_embd." in new_name:
  2421. return gguf.GGMLQuantizationType.F16
  2422. if ".position_embd." in new_name:
  2423. return gguf.GGMLQuantizationType.F32
  2424. return False
  2425. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2426. del bid # unused
  2427. if name.startswith("vision_model") or name.startswith("mlp"):
  2428. # process visual tensors
  2429. # correct name
  2430. if name.startswith("vision_model"):
  2431. name = "vision_tower." + name
  2432. if (".ls" in name or "position_embedding" in name) and not name.endswith(".weight"):
  2433. name += ".weight"
  2434. # split QKV tensors if needed
  2435. if ".qkv." in name:
  2436. if data_torch.ndim == 2: # weight
  2437. c3, _ = data_torch.shape
  2438. else: # bias
  2439. c3 = data_torch.shape[0]
  2440. assert c3 % 3 == 0
  2441. c = c3 // 3
  2442. wq = data_torch[:c]
  2443. wk = data_torch[c: c * 2]
  2444. wv = data_torch[c * 2:]
  2445. return [
  2446. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.q_proj")), wq),
  2447. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.k_proj")), wk),
  2448. (self.map_tensor_name(name.replace("attn.qkv", "self_attn.v_proj")), wv),
  2449. ]
  2450. return [(self.map_tensor_name(name), data_torch)]
  2451. return [] # skip other tensors
  2452. @ModelBase.register("WavTokenizerDec")
  2453. class WavTokenizerDecModel(TextModel):
  2454. model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC
  2455. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2456. del bid # unused
  2457. if \
  2458. name.endswith("codebook.cluster_size") or \
  2459. name.endswith("codebook.embed_avg") or \
  2460. name.endswith("codebook.inited"):
  2461. logger.debug(f"Skipping {name!r}")
  2462. return []
  2463. logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}")
  2464. return [(self.map_tensor_name(name), data_torch)]
  2465. def set_vocab(self):
  2466. self._set_vocab_none()
  2467. def set_gguf_parameters(self):
  2468. super().set_gguf_parameters()
  2469. self.gguf_writer.add_vocab_size (self.hparams["vocab_size"])
  2470. self.gguf_writer.add_features_length (self.hparams["n_embd_features"])
  2471. self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"])
  2472. self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"])
  2473. self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"])
  2474. self.gguf_writer.add_posnet_embedding_length(self.hparams["posnet"]["n_embd"])
  2475. self.gguf_writer.add_posnet_block_count (self.hparams["posnet"]["n_layer"])
  2476. self.gguf_writer.add_convnext_embedding_length(self.hparams["convnext"]["n_embd"])
  2477. self.gguf_writer.add_convnext_block_count (self.hparams["convnext"]["n_layer"])
  2478. self.gguf_writer.add_causal_attention(False)
  2479. @ModelBase.register("Qwen2MoeForCausalLM")
  2480. class Qwen2MoeModel(TextModel):
  2481. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  2482. def set_gguf_parameters(self):
  2483. super().set_gguf_parameters()
  2484. if (n_experts := self.hparams.get("num_experts")) is not None:
  2485. self.gguf_writer.add_expert_count(n_experts)
  2486. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  2487. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  2488. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  2489. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  2490. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  2491. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  2492. # YaRN is not enabled by default
  2493. # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
  2494. rope_scaling = self.hparams.get("rope_scaling") or {}
  2495. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  2496. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2497. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2498. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  2499. _experts: list[dict[str, Tensor]] | None = None
  2500. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2501. # process the experts separately
  2502. if name.find("experts") != -1:
  2503. n_experts = self.hparams["num_experts"]
  2504. assert bid is not None
  2505. if self._experts is None:
  2506. self._experts = [{} for _ in range(self.block_count)]
  2507. self._experts[bid][name] = data_torch
  2508. if len(self._experts[bid]) >= n_experts * 3:
  2509. tensors: list[tuple[str, Tensor]] = []
  2510. # merge the experts into a single 3d tensor
  2511. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  2512. datas: list[Tensor] = []
  2513. for xid in range(n_experts):
  2514. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2515. datas.append(self._experts[bid][ename])
  2516. del self._experts[bid][ename]
  2517. data_torch = torch.stack(datas, dim=0)
  2518. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2519. new_name = self.map_tensor_name(merged_name)
  2520. tensors.append((new_name, data_torch))
  2521. return tensors
  2522. else:
  2523. return []
  2524. return [(self.map_tensor_name(name), data_torch)]
  2525. def prepare_tensors(self):
  2526. super().prepare_tensors()
  2527. if self._experts is not None:
  2528. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2529. experts = [k for d in self._experts for k in d.keys()]
  2530. if len(experts) > 0:
  2531. raise ValueError(f"Unprocessed experts: {experts}")
  2532. @ModelBase.register("Qwen3ForCausalLM")
  2533. class Qwen3Model(Qwen2Model):
  2534. model_arch = gguf.MODEL_ARCH.QWEN3
  2535. @ModelBase.register("Qwen3MoeForCausalLM")
  2536. class Qwen3MoeModel(Qwen2MoeModel):
  2537. model_arch = gguf.MODEL_ARCH.QWEN3MOE
  2538. @ModelBase.register("GPT2LMHeadModel")
  2539. class GPT2Model(TextModel):
  2540. model_arch = gguf.MODEL_ARCH.GPT2
  2541. def set_gguf_parameters(self):
  2542. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  2543. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  2544. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2545. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  2546. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2547. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2548. self.gguf_writer.add_file_type(self.ftype)
  2549. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2550. del bid # unused
  2551. tensors: list[tuple[str, Tensor]] = []
  2552. # we don't need these
  2553. if name.endswith((".attn.bias", ".attn.masked_bias")):
  2554. return tensors
  2555. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  2556. data_torch = data_torch.transpose(1, 0)
  2557. new_name = self.map_tensor_name(name)
  2558. tensors.append((new_name, data_torch))
  2559. return tensors
  2560. @ModelBase.register("PhiForCausalLM")
  2561. class Phi2Model(TextModel):
  2562. model_arch = gguf.MODEL_ARCH.PHI2
  2563. def set_gguf_parameters(self):
  2564. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  2565. rot_pct = self.find_hparam(["partial_rotary_factor"])
  2566. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2567. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2568. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  2569. self.gguf_writer.add_embedding_length(n_embd)
  2570. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  2571. self.gguf_writer.add_block_count(block_count)
  2572. self.gguf_writer.add_head_count(n_head)
  2573. self.gguf_writer.add_head_count_kv(n_head)
  2574. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  2575. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  2576. self.gguf_writer.add_file_type(self.ftype)
  2577. self.gguf_writer.add_add_bos_token(False)
  2578. @ModelBase.register("Phi3ForCausalLM")
  2579. class Phi3MiniModel(TextModel):
  2580. model_arch = gguf.MODEL_ARCH.PHI3
  2581. def set_vocab(self):
  2582. # Phi-4 model uses GPT2Tokenizer
  2583. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2584. if tokenizer_config_file.is_file():
  2585. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2586. tokenizer_config_json = json.load(f)
  2587. tokenizer_class = tokenizer_config_json['tokenizer_class']
  2588. if tokenizer_class == 'GPT2Tokenizer':
  2589. return self._set_vocab_gpt2()
  2590. from sentencepiece import SentencePieceProcessor
  2591. tokenizer_path = self.dir_model / 'tokenizer.model'
  2592. if not tokenizer_path.is_file():
  2593. raise ValueError(f'Error: Missing {tokenizer_path}')
  2594. tokenizer = SentencePieceProcessor()
  2595. tokenizer.LoadFromFile(str(tokenizer_path))
  2596. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2597. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2598. scores: list[float] = [-10000.0] * vocab_size
  2599. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2600. for token_id in range(tokenizer.vocab_size()):
  2601. piece = tokenizer.IdToPiece(token_id)
  2602. text = piece.encode("utf-8")
  2603. score = tokenizer.GetScore(token_id)
  2604. toktype = SentencePieceTokenTypes.NORMAL
  2605. if tokenizer.IsUnknown(token_id):
  2606. toktype = SentencePieceTokenTypes.UNKNOWN
  2607. elif tokenizer.IsControl(token_id):
  2608. toktype = SentencePieceTokenTypes.CONTROL
  2609. elif tokenizer.IsUnused(token_id):
  2610. toktype = SentencePieceTokenTypes.UNUSED
  2611. elif tokenizer.IsByte(token_id):
  2612. toktype = SentencePieceTokenTypes.BYTE
  2613. tokens[token_id] = text
  2614. scores[token_id] = score
  2615. toktypes[token_id] = toktype
  2616. added_tokens_file = self.dir_model / 'added_tokens.json'
  2617. if added_tokens_file.is_file():
  2618. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2619. added_tokens_json = json.load(f)
  2620. for key in added_tokens_json:
  2621. token_id = added_tokens_json[key]
  2622. if token_id >= vocab_size:
  2623. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2624. continue
  2625. tokens[token_id] = key.encode("utf-8")
  2626. scores[token_id] = -1000.0
  2627. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2628. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2629. if tokenizer_config_file.is_file():
  2630. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2631. tokenizer_config_json = json.load(f)
  2632. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  2633. for token_id, foken_data in added_tokens_decoder.items():
  2634. token_id = int(token_id)
  2635. token = foken_data["content"].encode("utf-8")
  2636. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2637. if tokens[token_id] != token:
  2638. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2639. tokens[token_id] = token
  2640. scores[token_id] = -1000.0
  2641. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2642. if foken_data.get("special"):
  2643. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2644. tokenizer_file = self.dir_model / 'tokenizer.json'
  2645. if tokenizer_file.is_file():
  2646. with open(tokenizer_file, "r", encoding="utf-8") as f:
  2647. tokenizer_json = json.load(f)
  2648. added_tokens = tokenizer_json.get("added_tokens", [])
  2649. for foken_data in added_tokens:
  2650. token_id = int(foken_data["id"])
  2651. token = foken_data["content"].encode("utf-8")
  2652. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2653. if tokens[token_id] != token:
  2654. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2655. tokens[token_id] = token
  2656. scores[token_id] = -1000.0
  2657. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2658. if foken_data.get("special"):
  2659. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2660. self.gguf_writer.add_tokenizer_model("llama")
  2661. self.gguf_writer.add_tokenizer_pre("default")
  2662. self.gguf_writer.add_token_list(tokens)
  2663. self.gguf_writer.add_token_scores(scores)
  2664. self.gguf_writer.add_token_types(toktypes)
  2665. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2666. special_vocab.add_to_gguf(self.gguf_writer)
  2667. def set_gguf_parameters(self):
  2668. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  2669. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2670. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2671. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  2672. rms_eps = self.find_hparam(["rms_norm_eps"])
  2673. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  2674. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  2675. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  2676. rope_dims = int(rot_pct * n_embd) // n_head
  2677. self.gguf_writer.add_context_length(max_pos_embds)
  2678. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  2679. self.gguf_writer.add_embedding_length(n_embd)
  2680. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  2681. self.gguf_writer.add_block_count(block_count)
  2682. self.gguf_writer.add_head_count(n_head)
  2683. self.gguf_writer.add_head_count_kv(n_head_kv)
  2684. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  2685. self.gguf_writer.add_rope_dimension_count(rope_dims)
  2686. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  2687. self.gguf_writer.add_file_type(self.ftype)
  2688. sliding_window = self.hparams.get("sliding_window")
  2689. # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
  2690. if sliding_window is None:
  2691. sliding_window = 0
  2692. self.gguf_writer.add_sliding_window(sliding_window)
  2693. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  2694. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  2695. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  2696. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  2697. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  2698. rot_pct = self.hparams.get("partial_rotary_factor", 1.0)
  2699. rope_dims = int(rot_pct * n_embd) // n_head
  2700. # write rope scaling for long context (128k) model
  2701. rope_scaling = self.find_hparam(['rope_scaling'], True)
  2702. if rope_scaling is None:
  2703. return
  2704. scale = max_pos_embds / orig_max_pos_embds
  2705. rope_scaling_type = rope_scaling.get('rope_type', rope_scaling.get('type', '')).lower()
  2706. if len(rope_scaling_type) == 0:
  2707. raise KeyError('Missing the required key rope_scaling.type')
  2708. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  2709. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  2710. elif rope_scaling_type == 'yarn':
  2711. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  2712. else:
  2713. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  2714. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  2715. long_factors = rope_scaling.get('long_factor', None)
  2716. short_factors = rope_scaling.get('short_factor', None)
  2717. if long_factors is None or short_factors is None:
  2718. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  2719. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  2720. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}. long_factors = {len(long_factors)}, short_factors = {len(short_factors)}.')
  2721. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  2722. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  2723. @ModelBase.register("PhiMoEForCausalLM")
  2724. class PhiMoeModel(Phi3MiniModel):
  2725. model_arch = gguf.MODEL_ARCH.PHIMOE
  2726. _experts: list[dict[str, Tensor]] | None = None
  2727. def set_gguf_parameters(self):
  2728. super().set_gguf_parameters()
  2729. self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"])
  2730. self.gguf_writer.add_expert_count(self.hparams["num_local_experts"])
  2731. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2732. # process the experts separately
  2733. if name.find("block_sparse_moe.experts") != -1:
  2734. n_experts = self.hparams["num_local_experts"]
  2735. assert bid is not None
  2736. if self._experts is None:
  2737. self._experts = [{} for _ in range(self.block_count)]
  2738. self._experts[bid][name] = data_torch
  2739. if len(self._experts[bid]) >= n_experts * 3:
  2740. tensors: list[tuple[str, Tensor]] = []
  2741. # merge the experts into a single 3d tensor
  2742. for w_name in ["w1", "w2", "w3"]:
  2743. datas: list[Tensor] = []
  2744. for xid in range(n_experts):
  2745. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight"
  2746. datas.append(self._experts[bid][ename])
  2747. del self._experts[bid][ename]
  2748. data_torch = torch.stack(datas, dim=0)
  2749. merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight"
  2750. new_name = self.map_tensor_name(merged_name)
  2751. tensors.append((new_name, data_torch))
  2752. return tensors
  2753. else:
  2754. return []
  2755. return [(self.map_tensor_name(name), data_torch)]
  2756. def prepare_tensors(self):
  2757. super().prepare_tensors()
  2758. if self._experts is not None:
  2759. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2760. experts = [k for d in self._experts for k in d.keys()]
  2761. if len(experts) > 0:
  2762. raise ValueError(f"Unprocessed experts: {experts}")
  2763. @ModelBase.register("PlamoForCausalLM")
  2764. class PlamoModel(TextModel):
  2765. model_arch = gguf.MODEL_ARCH.PLAMO
  2766. def set_vocab(self):
  2767. self._set_vocab_sentencepiece()
  2768. def set_gguf_parameters(self):
  2769. hparams = self.hparams
  2770. block_count = hparams["num_hidden_layers"]
  2771. self.gguf_writer.add_context_length(4096) # not in config.json
  2772. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2773. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2774. self.gguf_writer.add_block_count(block_count)
  2775. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2776. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  2777. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  2778. self.gguf_writer.add_file_type(self.ftype)
  2779. def shuffle_attn_q_weight(self, data_torch):
  2780. assert data_torch.size() == (5120, 5120)
  2781. data_torch = data_torch.reshape(8, 5, 128, 5120)
  2782. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  2783. data_torch = torch.reshape(data_torch, (5120, 5120))
  2784. return data_torch
  2785. def shuffle_attn_output_weight(self, data_torch):
  2786. assert data_torch.size() == (5120, 5120)
  2787. data_torch = data_torch.reshape(5120, 8, 5, 128)
  2788. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  2789. data_torch = torch.reshape(data_torch, (5120, 5120))
  2790. return data_torch
  2791. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2792. del bid # unused
  2793. new_name = self.map_tensor_name(name)
  2794. # shuffle for broadcasting of gqa in ggml_mul_mat
  2795. if new_name.endswith("attn_q.weight"):
  2796. data_torch = self.shuffle_attn_q_weight(data_torch)
  2797. elif new_name.endswith("attn_output.weight"):
  2798. data_torch = self.shuffle_attn_output_weight(data_torch)
  2799. return [(new_name, data_torch)]
  2800. @ModelBase.register("CodeShellForCausalLM")
  2801. class CodeShellModel(TextModel):
  2802. model_arch = gguf.MODEL_ARCH.CODESHELL
  2803. def set_gguf_parameters(self):
  2804. block_count = self.hparams["n_layer"]
  2805. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  2806. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2807. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  2808. self.gguf_writer.add_block_count(block_count)
  2809. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2810. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  2811. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2812. self.gguf_writer.add_file_type(self.ftype)
  2813. self.gguf_writer.add_rope_freq_base(10000.0)
  2814. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2815. self.gguf_writer.add_rope_scaling_factor(1.0)
  2816. _has_tok_embd = False
  2817. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2818. del bid # unused
  2819. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  2820. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  2821. new_name = self.map_tensor_name(name)
  2822. # assuming token_embd.weight is seen before output.weight
  2823. if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  2824. # even though the tensor file(s) does not contain the word embeddings they are still in the weight map
  2825. if self.tensor_names and "transformer.wte.weight" in self.tensor_names:
  2826. logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied")
  2827. self.tensor_names.remove("transformer.wte.weight")
  2828. elif new_name == tok_embd_name:
  2829. self._has_tok_embd = True
  2830. return [(new_name, data_torch)]
  2831. @ModelBase.register("InternLM2ForCausalLM")
  2832. class InternLM2Model(TextModel):
  2833. model_arch = gguf.MODEL_ARCH.INTERNLM2
  2834. def set_vocab(self):
  2835. # (TODO): Is there a better way?
  2836. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  2837. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  2838. # recognized as an empty string in C++.
  2839. from sentencepiece import SentencePieceProcessor
  2840. from sentencepiece import sentencepiece_model_pb2 as model
  2841. tokenizer_path = self.dir_model / 'tokenizer.model'
  2842. tokens: list[bytes] = []
  2843. scores: list[float] = []
  2844. toktypes: list[int] = []
  2845. if not tokenizer_path.is_file():
  2846. logger.error(f'Error: Missing {tokenizer_path}')
  2847. sys.exit(1)
  2848. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2849. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2850. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2851. tokenizer = SentencePieceProcessor()
  2852. tokenizer.LoadFromFile(str(tokenizer_path))
  2853. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2854. for token_id in range(vocab_size):
  2855. piece = tokenizer.IdToPiece(token_id)
  2856. text = piece.encode("utf-8")
  2857. score = tokenizer.GetScore(token_id)
  2858. if text == b"\x00":
  2859. # (TODO): fixme
  2860. # Hack here and replace the \x00 characters.
  2861. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  2862. text = "🐉".encode("utf-8")
  2863. toktype = SentencePieceTokenTypes.NORMAL
  2864. if tokenizer.IsUnknown(token_id):
  2865. toktype = SentencePieceTokenTypes.UNKNOWN
  2866. elif tokenizer.IsControl(token_id):
  2867. toktype = SentencePieceTokenTypes.CONTROL
  2868. elif tokenizer.IsUnused(token_id):
  2869. toktype = SentencePieceTokenTypes.UNUSED
  2870. elif tokenizer.IsByte(token_id):
  2871. toktype = SentencePieceTokenTypes.BYTE
  2872. # take care of ununsed raw token
  2873. if piece.startswith('[UNUSED'):
  2874. toktype = SentencePieceTokenTypes.UNUSED
  2875. tokens.append(text)
  2876. scores.append(score)
  2877. toktypes.append(toktype)
  2878. added_tokens_file = self.dir_model / 'added_tokens.json'
  2879. if added_tokens_file.is_file():
  2880. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2881. added_tokens_json = json.load(f)
  2882. for key in added_tokens_json:
  2883. tokens.append(key.encode("utf-8"))
  2884. scores.append(-1000.0)
  2885. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  2886. chat_eos_token = '<|im_end|>'
  2887. chat_eos_token_id = None
  2888. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2889. if tokenizer_config_file.is_file():
  2890. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2891. tokenizer_config_json = json.load(f)
  2892. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  2893. for token_id, foken_data in added_tokens_decoder.items():
  2894. token_id = int(token_id)
  2895. token = foken_data["content"]
  2896. if token == chat_eos_token:
  2897. chat_eos_token_id = token_id
  2898. token = token.encode("utf-8")
  2899. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2900. if tokens[token_id] != token:
  2901. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2902. tokens[token_id] = token
  2903. scores[token_id] = -1000.0
  2904. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2905. if foken_data.get("special"):
  2906. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2907. tokenizer_file = self.dir_model / 'tokenizer.json'
  2908. if tokenizer_file.is_file():
  2909. with open(tokenizer_file, "r", encoding="utf-8") as f:
  2910. tokenizer_json = json.load(f)
  2911. added_tokens = tokenizer_json.get("added_tokens", [])
  2912. for foken_data in added_tokens:
  2913. token_id = int(foken_data["id"])
  2914. token = foken_data["content"]
  2915. if token == chat_eos_token:
  2916. chat_eos_token_id = token_id
  2917. token = token.encode("utf-8")
  2918. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2919. if tokens[token_id] != token:
  2920. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2921. tokens[token_id] = token
  2922. scores[token_id] = -1000.0
  2923. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2924. if foken_data.get("special"):
  2925. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2926. self.gguf_writer.add_tokenizer_model("llama")
  2927. self.gguf_writer.add_tokenizer_pre("default")
  2928. self.gguf_writer.add_token_list(tokens)
  2929. self.gguf_writer.add_token_scores(scores)
  2930. self.gguf_writer.add_token_types(toktypes)
  2931. self.gguf_writer.add_add_space_prefix(add_prefix)
  2932. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2933. old_eos = special_vocab.special_token_ids["eos"]
  2934. if chat_eos_token_id is not None:
  2935. # For the chat model, we replace the eos with '<|im_end|>'.
  2936. # TODO: this is a hack, should be fixed
  2937. # https://github.com/ggml-org/llama.cpp/pull/6745#issuecomment-2067687048
  2938. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  2939. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  2940. " in chat mode so that the conversation can end normally.")
  2941. special_vocab.add_to_gguf(self.gguf_writer)
  2942. def set_gguf_parameters(self):
  2943. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2944. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2945. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2946. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2947. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  2948. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2949. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2950. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  2951. self.gguf_writer.add_file_type(self.ftype)
  2952. rope_scaling = self.hparams.get("rope_scaling") or {}
  2953. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  2954. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2955. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  2956. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2957. num_heads = self.hparams["num_attention_heads"]
  2958. num_kv_heads = self.hparams["num_key_value_heads"]
  2959. n_embd = self.hparams["hidden_size"]
  2960. q_per_kv = num_heads // num_kv_heads
  2961. head_dim = n_embd // num_heads
  2962. num_groups = num_heads // q_per_kv
  2963. name = name.replace("language_model.", "") # InternVL
  2964. if name.startswith("mlp") or name.startswith("vision_model"):
  2965. # skip visual tensors
  2966. return []
  2967. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  2968. qkv = data_torch
  2969. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  2970. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  2971. # The model weights of q and k equire additional reshape.
  2972. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  2973. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  2974. v = v.reshape((-1, v.shape[-1]))
  2975. return [
  2976. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  2977. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  2978. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  2979. ]
  2980. else:
  2981. return [(self.map_tensor_name(name), data_torch)]
  2982. @ModelBase.register("InternLM3ForCausalLM")
  2983. class InternLM3Model(TextModel):
  2984. model_arch = gguf.MODEL_ARCH.LLAMA
  2985. def set_vocab(self):
  2986. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  2987. self.gguf_writer.add_tokenizer_model("llama")
  2988. self.gguf_writer.add_tokenizer_pre("default")
  2989. self.gguf_writer.add_token_list(tokens)
  2990. self.gguf_writer.add_token_scores(scores)
  2991. self.gguf_writer.add_token_types(toktypes)
  2992. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2993. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2994. if tokenizer_config_file.is_file():
  2995. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2996. tokenizer_config_json = json.load(f)
  2997. if "add_prefix_space" in tokenizer_config_json:
  2998. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  2999. if "added_tokens_decoder" in tokenizer_config_json:
  3000. for token_id, token_data in tokenizer_config_json["added_tokens_decoder"].items():
  3001. if token_data.get("special"):
  3002. token_id = int(token_id)
  3003. token = token_data["content"]
  3004. special_vocab._set_special_token(token, token_id)
  3005. # update eos token
  3006. if token == '<|im_end|>' and "eos" in special_vocab.special_token_ids:
  3007. special_vocab.special_token_ids["eos"] = token_id
  3008. special_vocab.add_to_gguf(self.gguf_writer)
  3009. def set_gguf_parameters(self):
  3010. super().set_gguf_parameters()
  3011. hparams = self.hparams
  3012. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3013. if "head_dim" in hparams:
  3014. rope_dim = hparams["head_dim"]
  3015. else:
  3016. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  3017. self.gguf_writer.add_rope_dimension_count(rope_dim)
  3018. rope_scaling = self.hparams.get("rope_scaling") or {}
  3019. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  3020. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3021. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  3022. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3023. n_head = self.hparams["num_attention_heads"]
  3024. n_kv_head = self.hparams.get("num_key_value_heads")
  3025. name = name.replace("language_model.", "") # InternVL
  3026. if name.startswith("mlp") or name.startswith("vision_model"):
  3027. # skip visual tensors
  3028. return []
  3029. if name.endswith(("q_proj.weight", "q_proj.bias")):
  3030. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3031. if name.endswith(("k_proj.weight", "k_proj.bias")):
  3032. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3033. return [(self.map_tensor_name(name), data_torch)]
  3034. @ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel", "BertForSequenceClassification")
  3035. class BertModel(TextModel):
  3036. model_arch = gguf.MODEL_ARCH.BERT
  3037. def __init__(self, *args, **kwargs):
  3038. super().__init__(*args, **kwargs)
  3039. self.vocab_size = None
  3040. if cls_out_labels := self.hparams.get("id2label"):
  3041. if len(cls_out_labels) == 2 and cls_out_labels[0] == "LABEL_0":
  3042. # Remove dummy labels added by AutoConfig
  3043. cls_out_labels = None
  3044. self.cls_out_labels = cls_out_labels
  3045. def set_gguf_parameters(self):
  3046. super().set_gguf_parameters()
  3047. self.gguf_writer.add_causal_attention(False)
  3048. self._try_set_pooling_type()
  3049. if self.cls_out_labels:
  3050. self.gguf_writer.add_classifier_output_labels([v for k, v in sorted(self.cls_out_labels.items())])
  3051. def set_vocab(self):
  3052. tokens, toktypes, tokpre = self.get_vocab_base()
  3053. self.vocab_size = len(tokens)
  3054. # we need this to validate the size of the token_type embeddings
  3055. # though currently we are passing all zeros to the token_type embeddings
  3056. # "Sequence A" or "Sequence B"
  3057. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3058. # convert to phantom space vocab
  3059. def phantom(tok):
  3060. if tok.startswith("[") and tok.endswith("]"):
  3061. return tok
  3062. if tok.startswith("##"):
  3063. return tok[2:]
  3064. return "\u2581" + tok
  3065. tokens = list(map(phantom, tokens))
  3066. # add vocab to gguf
  3067. self.gguf_writer.add_tokenizer_model("bert")
  3068. self.gguf_writer.add_tokenizer_pre(tokpre)
  3069. self.gguf_writer.add_token_list(tokens)
  3070. self.gguf_writer.add_token_types(toktypes)
  3071. # handle special tokens
  3072. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3073. special_vocab.add_to_gguf(self.gguf_writer)
  3074. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3075. del bid # unused
  3076. if name.startswith("bert."):
  3077. name = name[5:]
  3078. if name.endswith(".gamma"):
  3079. name = name[:-6] + ".weight"
  3080. if name.endswith(".beta"):
  3081. name = name[:-5] + ".bias"
  3082. # we are only using BERT for embeddings so we don't need the pooling layer
  3083. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  3084. return [] # we don't need these
  3085. if name.startswith("cls.predictions"):
  3086. return []
  3087. if name.startswith("cls.seq_relationship"):
  3088. return []
  3089. if self.cls_out_labels:
  3090. # For BertForSequenceClassification (direct projection layer)
  3091. if name == "classifier.weight":
  3092. name = "classifier.out_proj.weight"
  3093. if name == "classifier.bias":
  3094. name = "classifier.out_proj.bias"
  3095. return [(self.map_tensor_name(name), data_torch)]
  3096. def _xlmroberta_tokenizer_init(self) -> None:
  3097. # we need the pad_token_id to know how to chop down position_embd matrix
  3098. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3099. self._position_offset = 1 + pad_token_id
  3100. if "max_position_embeddings" in self.hparams:
  3101. self.hparams["max_position_embeddings"] -= self._position_offset
  3102. else:
  3103. self._position_offset = None
  3104. def _xlmroberta_set_vocab(self) -> None:
  3105. # to avoid TypeError: Descriptors cannot be created directly
  3106. # exception when importing sentencepiece_model_pb2
  3107. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  3108. from sentencepiece import SentencePieceProcessor
  3109. from sentencepiece import sentencepiece_model_pb2 as model
  3110. tokenizer_path = self.dir_model / 'sentencepiece.bpe.model'
  3111. tokenizer_json = {}
  3112. tokenizer_config_json = {}
  3113. if not tokenizer_path.is_file():
  3114. tokenizer_path = self.dir_model / 'tokenizer.json'
  3115. tokenizer_config_path = self.dir_model / 'tokenizer_config.json'
  3116. if not tokenizer_path.is_file():
  3117. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  3118. from base64 import b64decode
  3119. from transformers import AutoTokenizer
  3120. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  3121. with open(tokenizer_path, "r", encoding="utf-8") as fp:
  3122. tokenizer_json = json.load(fp)
  3123. if tokenizer_config_path.is_file():
  3124. with open(tokenizer_config_path, "r", encoding="utf-8") as fp:
  3125. tokenizer_config_json = json.load(fp)
  3126. add_prefix = tokenizer.add_prefix_space
  3127. remove_whitespaces = tokenizer.clean_up_tokenization_spaces
  3128. precompiled_charsmap = b64decode(tokenizer_json["normalizer"]["precompiled_charsmap"])
  3129. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size)
  3130. else:
  3131. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  3132. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  3133. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  3134. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  3135. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  3136. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  3137. tokenizer = SentencePieceProcessor()
  3138. tokenizer.LoadFromFile(str(tokenizer_path))
  3139. vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size())
  3140. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  3141. scores: list[float] = [-10000.0] * vocab_size
  3142. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  3143. if isinstance(tokenizer, SentencePieceProcessor):
  3144. for token_id in range(tokenizer.vocab_size()):
  3145. piece = tokenizer.IdToPiece(token_id)
  3146. text = piece.encode("utf-8")
  3147. score = tokenizer.GetScore(token_id)
  3148. toktype = SentencePieceTokenTypes.NORMAL
  3149. if tokenizer.IsUnknown(token_id):
  3150. toktype = SentencePieceTokenTypes.UNKNOWN
  3151. elif tokenizer.IsControl(token_id):
  3152. toktype = SentencePieceTokenTypes.CONTROL
  3153. elif tokenizer.IsUnused(token_id):
  3154. toktype = SentencePieceTokenTypes.UNUSED
  3155. elif tokenizer.IsByte(token_id):
  3156. toktype = SentencePieceTokenTypes.BYTE
  3157. tokens[token_id] = text
  3158. scores[token_id] = score
  3159. toktypes[token_id] = toktype
  3160. else:
  3161. added_vocab = tokenizer.get_added_vocab()
  3162. unk_token = tokenizer_config_json.get("unk_token")
  3163. unk_token_id = added_vocab.get(unk_token, tokenizer_json["model"].get("unk_id", 3))
  3164. for token_id in range(tokenizer.vocab_size):
  3165. piece = tokenizer._convert_id_to_token(token_id)
  3166. if (piece := tokenizer._convert_id_to_token(token_id)) is not None:
  3167. text = piece.encode("utf-8")
  3168. score = tokenizer_json["model"]["vocab"][token_id][1]
  3169. toktype = SentencePieceTokenTypes.NORMAL
  3170. if token_id == unk_token_id:
  3171. toktype = SentencePieceTokenTypes.UNKNOWN
  3172. elif token_id in tokenizer.all_special_ids:
  3173. toktype = SentencePieceTokenTypes.CONTROL
  3174. elif token_id in added_vocab.values():
  3175. toktype = SentencePieceTokenTypes.USER_DEFINED
  3176. # No reliable way to detect this, but jina doesn't have any
  3177. # elif tokenizer.IsByte(token_id):
  3178. # toktype = SentencePieceTokenTypes.BYTE
  3179. tokens[token_id] = text
  3180. scores[token_id] = score
  3181. toktypes[token_id] = toktype
  3182. if isinstance(tokenizer, SentencePieceProcessor):
  3183. # realign tokens (see HF tokenizer code)
  3184. tokens = [b'<s>', b'<pad>', b'</s>', b'<unk>'] + tokens[3:-1]
  3185. scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1]
  3186. toktypes = [
  3187. SentencePieceTokenTypes.CONTROL,
  3188. SentencePieceTokenTypes.CONTROL,
  3189. SentencePieceTokenTypes.CONTROL,
  3190. SentencePieceTokenTypes.UNKNOWN,
  3191. ] + toktypes[3:-1]
  3192. if self.model_arch == gguf.MODEL_ARCH.NOMIC_BERT_MOE:
  3193. # Add mask token missing from sentencepiece.bpe.model
  3194. tokens[250001] = b'<mask>'
  3195. scores[250001] = 0.0
  3196. toktypes[250001] = SentencePieceTokenTypes.CONTROL
  3197. self.gguf_writer.add_tokenizer_model("t5")
  3198. self.gguf_writer.add_tokenizer_pre("default")
  3199. self.gguf_writer.add_token_list(tokens)
  3200. self.gguf_writer.add_token_scores(scores)
  3201. self.gguf_writer.add_token_types(toktypes)
  3202. self.gguf_writer.add_add_space_prefix(add_prefix)
  3203. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3204. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  3205. if precompiled_charsmap:
  3206. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  3207. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3208. special_vocab.add_to_gguf(self.gguf_writer)
  3209. self.gguf_writer.add_add_bos_token(True)
  3210. self.gguf_writer.add_add_eos_token(True)
  3211. @ModelBase.register("DistilBertModel", "DistilBertForMaskedLM", "DistilBertForSequenceClassification")
  3212. class DistilBertModel(BertModel):
  3213. model_arch = gguf.MODEL_ARCH.BERT
  3214. def set_gguf_parameters(self):
  3215. self.gguf_writer.add_layer_norm_eps(1e-12)
  3216. logger.info("gguf: layer norm epsilon = 1e-12")
  3217. super().set_gguf_parameters()
  3218. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3219. if name.startswith("distilbert."):
  3220. name = name[11:]
  3221. # These layers act as MLM head, so we don't need them
  3222. if name.startswith("vocab_"):
  3223. return []
  3224. return super().modify_tensors(data_torch, name, bid)
  3225. @ModelBase.register("RobertaModel", "RobertaForSequenceClassification")
  3226. class RobertaModel(BertModel):
  3227. model_arch = gguf.MODEL_ARCH.BERT
  3228. def __init__(self, *args, **kwargs):
  3229. super().__init__(*args, **kwargs)
  3230. # we need the pad_token_id to know how to chop down position_embd matrix
  3231. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  3232. self._position_offset = 1 + pad_token_id
  3233. if "max_position_embeddings" in self.hparams:
  3234. self.hparams["max_position_embeddings"] -= self._position_offset
  3235. else:
  3236. self._position_offset = None
  3237. def set_vocab(self):
  3238. """Support BPE tokenizers for roberta models"""
  3239. bpe_tok_path = self.dir_model / "tokenizer.json"
  3240. if bpe_tok_path.exists():
  3241. self._set_vocab_gpt2()
  3242. self.gguf_writer.add_add_bos_token(True)
  3243. self.gguf_writer.add_add_eos_token(True)
  3244. # we need this to validate the size of the token_type embeddings
  3245. # though currently we are passing all zeros to the token_type embeddings
  3246. # "Sequence A" or "Sequence B"
  3247. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  3248. else:
  3249. return super().set_vocab()
  3250. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3251. # if name starts with "roberta.", remove the prefix
  3252. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  3253. if name.startswith("roberta."):
  3254. name = name[8:]
  3255. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  3256. if name == "embeddings.position_embeddings.weight":
  3257. if self._position_offset is not None:
  3258. data_torch = data_torch[self._position_offset:,:]
  3259. return super().modify_tensors(data_torch, name, bid)
  3260. @ModelBase.register("NomicBertModel")
  3261. class NomicBertModel(BertModel):
  3262. model_arch = gguf.MODEL_ARCH.BERT
  3263. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, **kwargs: Any):
  3264. hparams = kwargs.pop("hparams", None)
  3265. if hparams is None:
  3266. hparams = ModelBase.load_hparams(dir_model)
  3267. self.is_moe = bool(hparams.get("moe_every_n_layers"))
  3268. self.model_arch = gguf.MODEL_ARCH.NOMIC_BERT_MOE if self.is_moe else gguf.MODEL_ARCH.NOMIC_BERT
  3269. super().__init__(dir_model, ftype, fname_out, hparams=hparams, **kwargs)
  3270. self._tokenizer_is_xlmroberta = self._is_tokenizer_xlmroberta()
  3271. if self._tokenizer_is_xlmroberta:
  3272. self._xlmroberta_tokenizer_init()
  3273. npos, mtp = self.hparams["n_positions"], self.hparams.get("max_trained_positions", 2048)
  3274. if npos == 8192 and mtp == 2048:
  3275. self.hparams["n_positions"] = 2048 # nomic-embed-text v1 and v1.5 are trained for 2048 tokens.
  3276. elif npos == 2048 and mtp == 2048:
  3277. self.hparams["n_positions"] = 512 # nomic-embed-text-v2-moe is trained for 512 tokens.
  3278. else:
  3279. raise ValueError(f"unrecognized parameters: n_positions={npos}, max_trained_positions={mtp}")
  3280. assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu"
  3281. # this doesn't do anything in the HF version
  3282. assert self.hparams["causal"] is False
  3283. # no bias tensors unless MoE
  3284. assert self.hparams["qkv_proj_bias"] == self.is_moe
  3285. assert self.hparams["mlp_fc1_bias"] == self.is_moe
  3286. assert self.hparams["mlp_fc2_bias"] == self.is_moe
  3287. # norm at end of layer
  3288. assert self.hparams["prenorm"] is False
  3289. # standard RoPE
  3290. assert self.hparams["rotary_emb_fraction"] == 1.0
  3291. assert self.hparams["rotary_emb_interleaved"] is False
  3292. assert self.hparams["rotary_emb_scale_base"] is None
  3293. def set_vocab(self) -> None:
  3294. if self._tokenizer_is_xlmroberta:
  3295. return self._xlmroberta_set_vocab()
  3296. return super().set_vocab()
  3297. def modify_tensors(self, data_torch: torch.Tensor, name: str, bid: int | None) -> Iterable[tuple[str, torch.Tensor]]:
  3298. # If the tensor is an experts bias tensor, skip it by returning an empty list.
  3299. if "mlp.experts.bias" in name:
  3300. return [] # Explicitly return an empty list.
  3301. if "mlp.experts.mlp.w1" in name:
  3302. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3303. name += ".weight"
  3304. if "mlp.experts.mlp.w2" in name:
  3305. data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"])
  3306. data_torch = data_torch.transpose(1, 2)
  3307. name += ".weight"
  3308. return [(self.map_tensor_name(name), data_torch)]
  3309. def set_gguf_parameters(self):
  3310. super().set_gguf_parameters()
  3311. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  3312. if self.is_moe:
  3313. self.gguf_writer.add_moe_every_n_layers(self.hparams["moe_every_n_layers"])
  3314. self.gguf_writer.add_expert_count(self.hparams["num_experts"])
  3315. self.gguf_writer.add_expert_used_count(self.hparams["moe_top_k"])
  3316. def _is_tokenizer_xlmroberta(self) -> bool:
  3317. with open(self.dir_model / "tokenizer.json") as f:
  3318. tokenizer_json = json.load(f)
  3319. toktyp = tokenizer_json["model"]["type"]
  3320. if toktyp == "Unigram":
  3321. return True
  3322. if toktyp == "WordPiece":
  3323. return False
  3324. raise ValueError(f"unknown tokenizer: {toktyp}")
  3325. @ModelBase.register("NeoBERT", "NeoBERTLMHead", "NeoBERTForSequenceClassification")
  3326. class NeoBert(BertModel):
  3327. model_arch = gguf.MODEL_ARCH.NEO_BERT
  3328. def set_gguf_parameters(self):
  3329. super().set_gguf_parameters()
  3330. # NeoBERT uses 2/3 of the intermediate size as feed forward length
  3331. self.gguf_writer.add_feed_forward_length(int(2 * self.hparams["intermediate_size"] / 3))
  3332. self.gguf_writer.add_rope_freq_base(10000.0) # default value for NeoBERT
  3333. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3334. f_rms_eps = self.hparams.get("norm_eps", 1e-6) # default value for NeoBERT
  3335. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  3336. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  3337. self.gguf_writer.add_pooling_type(gguf.PoolingType.CLS) # https://huggingface.co/chandar-lab/NeoBERT#how-to-use
  3338. def modify_tensors(self, data_torch, name, bid):
  3339. if name.startswith("decoder."):
  3340. return []
  3341. if name.startswith("model."):
  3342. name = name[6:]
  3343. return super().modify_tensors(data_torch, name, bid)
  3344. @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification")
  3345. class XLMRobertaModel(BertModel):
  3346. model_arch = gguf.MODEL_ARCH.BERT
  3347. def __init__(self, *args, **kwargs):
  3348. super().__init__(*args, **kwargs)
  3349. self._xlmroberta_tokenizer_init()
  3350. def set_vocab(self):
  3351. self._xlmroberta_set_vocab()
  3352. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3353. # if name starts with "roberta.", remove the prefix
  3354. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  3355. if name.startswith("roberta."):
  3356. name = name[8:]
  3357. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  3358. if name == "embeddings.position_embeddings.weight":
  3359. if self._position_offset is not None:
  3360. data_torch = data_torch[self._position_offset:,:]
  3361. return super().modify_tensors(data_torch, name, bid)
  3362. @ModelBase.register("GemmaForCausalLM")
  3363. class GemmaModel(TextModel):
  3364. model_arch = gguf.MODEL_ARCH.GEMMA
  3365. def set_vocab(self):
  3366. self._set_vocab_sentencepiece()
  3367. # TODO: these special tokens should be exported only for the CodeGemma family
  3368. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  3369. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  3370. special_vocab._set_special_token("prefix", 67)
  3371. special_vocab._set_special_token("suffix", 69)
  3372. special_vocab._set_special_token("middle", 68)
  3373. special_vocab._set_special_token("fsep", 70)
  3374. special_vocab._set_special_token("eot", 107)
  3375. special_vocab.chat_template = None # do not add it twice
  3376. special_vocab.add_to_gguf(self.gguf_writer)
  3377. self.gguf_writer.add_add_space_prefix(False)
  3378. def set_gguf_parameters(self):
  3379. hparams = self.hparams
  3380. block_count = hparams["num_hidden_layers"]
  3381. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  3382. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3383. self.gguf_writer.add_block_count(block_count)
  3384. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3385. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3386. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  3387. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3388. self.gguf_writer.add_key_length(hparams["head_dim"])
  3389. self.gguf_writer.add_value_length(hparams["head_dim"])
  3390. self.gguf_writer.add_file_type(self.ftype)
  3391. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3392. del bid # unused
  3393. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  3394. # To prevent errors, skip loading lm_head.weight.
  3395. if name == "lm_head.weight":
  3396. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  3397. return []
  3398. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  3399. if name.endswith("norm.weight"):
  3400. data_torch = data_torch + 1
  3401. return [(self.map_tensor_name(name), data_torch)]
  3402. @ModelBase.register("Gemma2ForCausalLM")
  3403. class Gemma2Model(TextModel):
  3404. model_arch = gguf.MODEL_ARCH.GEMMA2
  3405. def set_vocab(self):
  3406. self._set_vocab_sentencepiece()
  3407. self.gguf_writer.add_add_space_prefix(False)
  3408. def set_gguf_parameters(self):
  3409. hparams = self.hparams
  3410. block_count = hparams["num_hidden_layers"]
  3411. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  3412. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3413. self.gguf_writer.add_block_count(block_count)
  3414. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3415. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  3416. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  3417. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  3418. self.gguf_writer.add_key_length(hparams["head_dim"])
  3419. self.gguf_writer.add_value_length(hparams["head_dim"])
  3420. self.gguf_writer.add_file_type(self.ftype)
  3421. self.gguf_writer.add_attn_logit_softcapping(
  3422. self.hparams["attn_logit_softcapping"]
  3423. )
  3424. self.gguf_writer.add_final_logit_softcapping(
  3425. self.hparams["final_logit_softcapping"]
  3426. )
  3427. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  3428. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3429. del bid # unused
  3430. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  3431. # To prevent errors, skip loading lm_head.weight.
  3432. if name == "lm_head.weight":
  3433. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  3434. return []
  3435. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  3436. if name.endswith("norm.weight"):
  3437. data_torch = data_torch + 1
  3438. return [(self.map_tensor_name(name), data_torch)]
  3439. @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration")
  3440. class Gemma3Model(TextModel):
  3441. model_arch = gguf.MODEL_ARCH.GEMMA3
  3442. def set_vocab(self):
  3443. self._set_vocab_sentencepiece()
  3444. self.gguf_writer.add_add_space_prefix(False)
  3445. def set_gguf_parameters(self):
  3446. hparams = self.hparams
  3447. block_count = hparams["num_hidden_layers"]
  3448. # some default values are not specified in the hparams
  3449. self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072))
  3450. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  3451. self.gguf_writer.add_block_count(block_count)
  3452. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  3453. self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8))
  3454. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6))
  3455. self.gguf_writer.add_key_length(hparams.get("head_dim", 256))
  3456. self.gguf_writer.add_value_length(hparams.get("head_dim", 256))
  3457. self.gguf_writer.add_file_type(self.ftype)
  3458. self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers
  3459. # both attn_logit_softcapping and final_logit_softcapping are removed in Gemma3
  3460. assert hparams.get("attn_logit_softcapping") is None
  3461. assert hparams.get("final_logit_softcapping") is None
  3462. self.gguf_writer.add_sliding_window(hparams["sliding_window"])
  3463. self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4))
  3464. if hparams.get("rope_scaling") is not None:
  3465. assert hparams["rope_scaling"]["rope_type"] == "linear"
  3466. # important: this rope_scaling is only applied for global layers, and not used by 1B model
  3467. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3468. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  3469. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3470. del bid # unused
  3471. if name.startswith("language_model."):
  3472. name = name.replace("language_model.", "")
  3473. elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  3474. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  3475. return [] # skip vision tensors
  3476. # remove OOV (out-of-vocabulary) rows in token_embd
  3477. if "embed_tokens.weight" in name:
  3478. vocab = self._create_vocab_sentencepiece()
  3479. tokens = vocab[0]
  3480. data_torch = data_torch[:len(tokens)]
  3481. # ref code in Gemma3RMSNorm
  3482. # output = output * (1.0 + self.weight.float())
  3483. if name.endswith("norm.weight"):
  3484. data_torch = data_torch + 1
  3485. return [(self.map_tensor_name(name), data_torch)]
  3486. @ModelBase.register("Gemma3ForConditionalGeneration")
  3487. class Gemma3VisionModel(MmprojModel):
  3488. def set_gguf_parameters(self):
  3489. super().set_gguf_parameters()
  3490. hparams = self.hparams
  3491. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.GEMMA3)
  3492. # default values below are taken from HF tranformers code
  3493. self.gguf_writer.add_vision_attention_layernorm_eps(hparams.get("layer_norm_eps", 1e-6))
  3494. self.gguf_writer.add_vision_use_gelu(True)
  3495. # calculate proj_scale_factor (used by tinygemma3 test model)
  3496. image_seq_length = self.preprocessor_config.get("image_seq_length", 256)
  3497. n_per_side = int(image_seq_length ** 0.5)
  3498. image_size = self.hparams["image_size"]
  3499. patch_size = self.hparams["patch_size"]
  3500. proj_scale_factor = (image_size // patch_size) // n_per_side
  3501. if proj_scale_factor > 0 and proj_scale_factor != 4:
  3502. # we only need to write this if it's not the default value
  3503. # in this case, we are converting a test model
  3504. self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
  3505. def tensor_force_quant(self, name, new_name, bid, n_dims):
  3506. del bid, new_name, n_dims # unused
  3507. # related to https://github.com/ggml-org/llama.cpp/issues/13025
  3508. if "input_projection" in name:
  3509. return gguf.GGMLQuantizationType.F16
  3510. if ".embeddings." in name:
  3511. return gguf.GGMLQuantizationType.F32
  3512. return False
  3513. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3514. del bid # unused
  3515. if "vision_model.head." in name:
  3516. return [] # skip redundant tensors for tinygemma3
  3517. if name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \
  3518. or name.startswith("multimodal_projector.") or name.startswith("vision_model."):
  3519. # process vision tensors
  3520. name = name.replace("_weight", ".weight")
  3521. # correct norm value ; only this "soft_emb_norm" need to be corrected as it's part of Gemma projector
  3522. # the other norm values are part of SigLIP model, and they are already correct
  3523. # ref code: Gemma3RMSNorm
  3524. if "soft_emb_norm.weight" in name:
  3525. logger.info(f"Correcting norm value for '{name}'")
  3526. data_torch = data_torch + 1
  3527. return [(self.map_tensor_name(name), data_torch)]
  3528. return [] # skip other tensors
  3529. @ModelBase.register("Starcoder2ForCausalLM")
  3530. class StarCoder2Model(TextModel):
  3531. model_arch = gguf.MODEL_ARCH.STARCODER2
  3532. @ModelBase.register("Rwkv6ForCausalLM")
  3533. class Rwkv6Model(TextModel):
  3534. model_arch = gguf.MODEL_ARCH.RWKV6
  3535. def set_vocab(self):
  3536. self._set_vocab_rwkv_world()
  3537. def set_gguf_parameters(self):
  3538. block_count = self.hparams["num_hidden_layers"]
  3539. head_size = self.hparams["head_size"]
  3540. hidden_size = self.hparams["hidden_size"]
  3541. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  3542. rescale_every_n_layers = self.hparams["rescale_every"]
  3543. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else int((hidden_size * 3.5) // 32 * 32)
  3544. time_mix_extra_dim = 64 if hidden_size == 4096 else 32
  3545. time_decay_extra_dim = 128 if hidden_size == 4096 else 64
  3546. # RWKV isn't context limited
  3547. self.gguf_writer.add_context_length(1048576)
  3548. self.gguf_writer.add_embedding_length(hidden_size)
  3549. self.gguf_writer.add_block_count(block_count)
  3550. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  3551. self.gguf_writer.add_rescale_every_n_layers(rescale_every_n_layers)
  3552. self.gguf_writer.add_wkv_head_size(head_size)
  3553. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  3554. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  3555. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3556. self.gguf_writer.add_file_type(self.ftype)
  3557. # required by llama.cpp, unused
  3558. self.gguf_writer.add_head_count(0)
  3559. lerp_weights: dict[int, dict[str, Tensor]] = {}
  3560. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3561. new_name = self.map_tensor_name(name)
  3562. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  3563. new_name += ".weight"
  3564. if new_name.endswith("time_mix_w1.weight") or new_name.endswith("time_mix_decay_w1.weight") or new_name.endswith("time_mix_decay_w2.weight"):
  3565. data_torch = data_torch.transpose(0, 1)
  3566. if new_name.endswith("time_mix_w2.weight"):
  3567. data_torch = data_torch.permute(0, 2, 1)
  3568. if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name:
  3569. data_torch = data_torch.squeeze()
  3570. try:
  3571. rescale_every_n_layers = self.hparams["rescale_every"]
  3572. if rescale_every_n_layers > 0:
  3573. if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):
  3574. data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers))
  3575. except KeyError:
  3576. pass
  3577. # concat time_mix_lerp weights to reduce some cpu overhead
  3578. # also reduces the number of tensors in the model
  3579. if bid is not None and "time_mix_lerp" in new_name and "time_mix_lerp_x" not in new_name:
  3580. try:
  3581. self.lerp_weights[bid][new_name] = data_torch
  3582. except KeyError:
  3583. self.lerp_weights[bid] = {new_name: data_torch}
  3584. if all(f"blk.{bid}.time_mix_lerp_{i}.weight" in self.lerp_weights[bid].keys() for i in ["w", "k", "v", "r", "g"]):
  3585. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3586. data = torch.stack([self.lerp_weights[bid][f"blk.{bid}.time_mix_lerp_{i}.weight"].unsqueeze(0) for i in ["w", "k", "v", "r", "g"]], dim=0).unsqueeze(1)
  3587. yield (new_name, data)
  3588. return
  3589. yield (new_name, data_torch)
  3590. @ModelBase.register("RWKV6Qwen2ForCausalLM")
  3591. class RWKV6Qwen2Model(Rwkv6Model):
  3592. model_arch = gguf.MODEL_ARCH.RWKV6QWEN2
  3593. def set_vocab(self):
  3594. try:
  3595. self._set_vocab_sentencepiece()
  3596. except FileNotFoundError:
  3597. self._set_vocab_gpt2()
  3598. def set_gguf_parameters(self):
  3599. block_count = self.hparams["num_hidden_layers"]
  3600. num_attention_heads = self.hparams["num_attention_heads"]
  3601. num_key_value_heads = self.hparams["num_key_value_heads"]
  3602. hidden_size = self.hparams["hidden_size"]
  3603. head_size = hidden_size // num_attention_heads
  3604. rms_norm_eps = self.hparams["rms_norm_eps"]
  3605. intermediate_size = self.hparams["intermediate_size"]
  3606. time_mix_extra_dim = self.hparams.get("lora_rank_tokenshift", 64 if hidden_size >= 4096 else 32)
  3607. time_decay_extra_dim = self.hparams.get("lora_rank_decay", 128 if hidden_size >= 4096 else 64)
  3608. # RWKV isn't context limited
  3609. self.gguf_writer.add_context_length(1048576)
  3610. self.gguf_writer.add_embedding_length(hidden_size)
  3611. self.gguf_writer.add_block_count(block_count)
  3612. self.gguf_writer.add_wkv_head_size(head_size)
  3613. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  3614. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  3615. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3616. self.gguf_writer.add_file_type(self.ftype)
  3617. # special parameters for time_mixing in RWKV6QWEN2
  3618. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3619. self.gguf_writer.add_token_shift_count(1)
  3620. # RWKV6QWEN2 use grouped key/value like GQA
  3621. self.gguf_writer.add_head_count_kv(num_key_value_heads)
  3622. # required by llama.cpp, unused
  3623. self.gguf_writer.add_head_count(0)
  3624. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3625. for new_name, data in super().modify_tensors(data_torch, name, bid):
  3626. if "time_mix_w1" in new_name or "time_mix_w2" in new_name:
  3627. data = data.view(5, -1, data.shape[-1])
  3628. # rwkv6qwen2 has a different order of rkvwg instead of the original wkvrg
  3629. # permute them here to avoid code changes
  3630. data = torch.stack([data[3], data[1], data[2], data[0], data[4]], dim=0).view(-1, data.shape[-1])
  3631. if "w2" in new_name:
  3632. data = data.view(5, -1, data.shape[-1])
  3633. yield (new_name, data)
  3634. continue
  3635. yield (new_name, data)
  3636. @ModelBase.register("Rwkv7ForCausalLM", "RWKV7ForCausalLM")
  3637. class Rwkv7Model(TextModel):
  3638. model_arch = gguf.MODEL_ARCH.RWKV7
  3639. def set_vocab(self):
  3640. self._set_vocab_rwkv_world()
  3641. def calc_lora_rank(self, hidden_size, exponent, multiplier):
  3642. return max(1, round(hidden_size ** exponent * multiplier / 32)) * 32
  3643. def set_gguf_parameters(self):
  3644. block_count = self.hparams["num_hidden_layers"]
  3645. try:
  3646. head_size = self.hparams["head_size"]
  3647. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  3648. except KeyError:
  3649. head_size = self.hparams["head_dim"]
  3650. layer_norm_eps = self.hparams["norm_eps"]
  3651. hidden_size = self.hparams["hidden_size"]
  3652. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else (hidden_size * 4)
  3653. # ICLR: In-Context-Learning-Rate
  3654. try:
  3655. lora_rank_decay = self.hparams["lora_rank_decay"] if self.hparams["lora_rank_decay"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3656. lora_rank_iclr = self.hparams["lora_rank_iclr"] if self.hparams["lora_rank_iclr"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3657. lora_rank_value_residual_mix = self.hparams["lora_rank_value_residual_mix"] if self.hparams["lora_rank_value_residual_mix"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  3658. lora_rank_gate = self.hparams["lora_rank_gate"] if self.hparams["lora_rank_gate"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  3659. except KeyError:
  3660. lora_rank_decay = self.hparams["decay_low_rank_dim"] if self.hparams["decay_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3661. lora_rank_iclr = self.hparams["a_low_rank_dim"] if self.hparams["a_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8)
  3662. lora_rank_value_residual_mix = self.hparams["v_low_rank_dim"] if self.hparams["v_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3)
  3663. lora_rank_gate = self.hparams["gate_low_rank_dim"] if self.hparams["gate_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6)
  3664. # RWKV isn't context limited
  3665. self.gguf_writer.add_context_length(1048576)
  3666. self.gguf_writer.add_embedding_length(hidden_size)
  3667. self.gguf_writer.add_block_count(block_count)
  3668. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  3669. self.gguf_writer.add_wkv_head_size(head_size)
  3670. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  3671. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  3672. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  3673. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  3674. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3675. self.gguf_writer.add_file_type(self.ftype)
  3676. # required by llama.cpp, unused
  3677. self.gguf_writer.add_head_count(0)
  3678. lerp_weights: dict[int, dict[str, Tensor]] = {}
  3679. lora_needs_transpose: bool = True
  3680. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3681. # unify tensor names here to make life easier
  3682. name = name.replace("blocks", "layers").replace("ffn", "feed_forward")
  3683. name = name.replace("self_attn", "attention").replace("attn", "attention")
  3684. name = name.replace("time_mixer.", "")
  3685. # lora layer names in fla-hub's impl
  3686. if "_lora.lora" in name:
  3687. self.lora_needs_transpose = False
  3688. name = name.replace("_lora.lora.0.weight", "1.weight")
  3689. name = name.replace("_lora.lora.2.weight", "2.weight")
  3690. name = name.replace("_lora.lora.2.bias", "0.weight")
  3691. name = name.replace("feed_forward_norm", "ln2")
  3692. name = name.replace("g_norm", "ln_x")
  3693. if "attention.v" in name and "value" not in self.map_tensor_name(name) and bid == 0:
  3694. # some models have dummy v0/v1/v2 on first layer while others don't
  3695. # ignore them all since they are not used
  3696. return
  3697. wkv_has_gate = self.hparams.get("wkv_has_gate", True)
  3698. lerp_list = ["r", "w", "k", "v", "a", "g"] if wkv_has_gate else ["r", "w", "k", "v", "a"]
  3699. if bid is not None and "attention.x_" in name:
  3700. if "attention.x_x" in name:
  3701. # already concatenated
  3702. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3703. data = data_torch.reshape(len(lerp_list), 1, 1, -1)
  3704. yield (new_name, data)
  3705. else:
  3706. try:
  3707. self.lerp_weights[bid][name] = data_torch
  3708. except KeyError:
  3709. self.lerp_weights[bid] = {name: data_torch}
  3710. if all(f"model.layers.{bid}.attention.x_{i}" in self.lerp_weights[bid].keys() for i in lerp_list):
  3711. new_name = f"blk.{bid}.time_mix_lerp_fused.weight"
  3712. data = torch.stack([self.lerp_weights[bid][f"model.layers.{bid}.attention.x_{i}"] for i in lerp_list], dim=0)
  3713. yield (new_name, data)
  3714. return
  3715. else:
  3716. data_torch = data_torch.squeeze()
  3717. new_name = self.map_tensor_name(name)
  3718. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  3719. new_name += ".weight"
  3720. if self.lora_needs_transpose and any(
  3721. new_name.endswith(t) for t in [
  3722. "time_mix_w1.weight", "time_mix_w2.weight",
  3723. "time_mix_a1.weight", "time_mix_a2.weight",
  3724. "time_mix_v1.weight", "time_mix_v2.weight",
  3725. "time_mix_g1.weight", "time_mix_g2.weight",
  3726. ]
  3727. ):
  3728. data_torch = data_torch.transpose(0, 1)
  3729. if 'r_k' in new_name:
  3730. data_torch = data_torch.flatten()
  3731. if bid == 0 and "time_mix_a" in new_name:
  3732. # dummy v0/v1/v2 on first layer
  3733. # easist way to make llama happy
  3734. yield (new_name.replace("time_mix_a", "time_mix_v"), data_torch)
  3735. yield (new_name, data_torch)
  3736. @ModelBase.register("RwkvHybridForCausalLM")
  3737. class ARwkv7Model(Rwkv7Model):
  3738. model_arch = gguf.MODEL_ARCH.ARWKV7
  3739. def set_vocab(self):
  3740. try:
  3741. self._set_vocab_sentencepiece()
  3742. except FileNotFoundError:
  3743. self._set_vocab_gpt2()
  3744. def set_gguf_parameters(self):
  3745. block_count = self.hparams["num_hidden_layers"]
  3746. hidden_size = self.hparams["hidden_size"]
  3747. head_size = self.hparams["head_size"]
  3748. rms_norm_eps = self.hparams["rms_norm_eps"]
  3749. intermediate_size = self.hparams["intermediate_size"]
  3750. wkv_has_gate = self.hparams["wkv_has_gate"]
  3751. assert self.hparams["wkv_version"] == 7
  3752. # ICLR: In-Context-Learning-Rate
  3753. lora_rank_decay = 64
  3754. lora_rank_iclr = 64
  3755. lora_rank_value_residual_mix = 32
  3756. lora_rank_gate = 128 if wkv_has_gate else 0
  3757. # RWKV isn't context limited
  3758. self.gguf_writer.add_context_length(1048576)
  3759. self.gguf_writer.add_embedding_length(hidden_size)
  3760. self.gguf_writer.add_block_count(block_count)
  3761. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3762. self.gguf_writer.add_wkv_head_size(head_size)
  3763. self.gguf_writer.add_decay_lora_rank(lora_rank_decay)
  3764. self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr)
  3765. self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix)
  3766. self.gguf_writer.add_gate_lora_rank(lora_rank_gate)
  3767. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3768. self.gguf_writer.add_file_type(self.ftype)
  3769. self.gguf_writer.add_token_shift_count(1)
  3770. # required by llama.cpp, unused
  3771. self.gguf_writer.add_head_count(0)
  3772. @ModelBase.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
  3773. class MambaModel(TextModel):
  3774. model_arch = gguf.MODEL_ARCH.MAMBA
  3775. def set_vocab(self):
  3776. vocab_size = self.hparams["vocab_size"]
  3777. # Round vocab size to next multiple of 8
  3778. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  3779. # pad using ceiling division
  3780. # ref: https://stackoverflow.com/a/17511341/22827863
  3781. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  3782. self.hparams["vocab_size"] = vocab_size
  3783. if (self.dir_model / "tokenizer.json").is_file():
  3784. self._set_vocab_gpt2()
  3785. elif (self.dir_model / "tokenizer.model").is_file():
  3786. self._set_vocab_sentencepiece()
  3787. else:
  3788. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  3789. self._set_vocab_builtin("gpt-neox", vocab_size)
  3790. def set_gguf_parameters(self):
  3791. d_model = self.find_hparam(["hidden_size", "d_model"])
  3792. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  3793. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  3794. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  3795. # ceiling division
  3796. # ref: https://stackoverflow.com/a/17511341/22827863
  3797. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  3798. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  3799. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  3800. use_dt_b_c_norm = False
  3801. # For falconmamba we do apply RMS norm on B / DT and C layers
  3802. if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
  3803. use_dt_b_c_norm = True
  3804. # Fail early for models which don't have a block expansion factor of 2
  3805. assert d_inner == 2 * d_model
  3806. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  3807. self.gguf_writer.add_embedding_length(d_model)
  3808. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  3809. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  3810. self.gguf_writer.add_block_count(self.block_count)
  3811. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  3812. self.gguf_writer.add_ssm_inner_size(d_inner)
  3813. self.gguf_writer.add_ssm_state_size(d_state)
  3814. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  3815. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  3816. self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
  3817. self.gguf_writer.add_file_type(self.ftype)
  3818. _tok_embd = None
  3819. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3820. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  3821. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  3822. new_name = self.map_tensor_name(name)
  3823. if name.endswith(".A_log"):
  3824. logger.debug("A_log --> A ==> " + new_name)
  3825. data_torch = -torch.exp(data_torch)
  3826. # [4 1 8192 1] -> [4 8192 1 1]
  3827. if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
  3828. data_torch = data_torch.squeeze()
  3829. # assuming token_embd.weight is seen before output.weight
  3830. if self._tok_embd is not None and new_name == output_name:
  3831. if torch.equal(self._tok_embd, data_torch):
  3832. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  3833. return []
  3834. elif new_name == tok_embd_name:
  3835. self._tok_embd = data_torch
  3836. return [(new_name, data_torch)]
  3837. @ModelBase.register("CohereForCausalLM")
  3838. class CommandR2Model(TextModel):
  3839. model_arch = gguf.MODEL_ARCH.COMMAND_R
  3840. def __init__(self, *args, **kwargs):
  3841. super().__init__(*args, **kwargs)
  3842. # max_position_embeddings = 8192 in config.json but model was actually
  3843. # trained on 128k context length
  3844. # aya-23 models don't have model_max_length specified
  3845. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  3846. def set_gguf_parameters(self):
  3847. super().set_gguf_parameters()
  3848. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  3849. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3850. @ModelBase.register("Cohere2ForCausalLM")
  3851. class Cohere2Model(TextModel):
  3852. model_arch = gguf.MODEL_ARCH.COHERE2
  3853. def set_gguf_parameters(self):
  3854. super().set_gguf_parameters()
  3855. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  3856. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  3857. self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
  3858. rotary_pct = self.hparams["rotary_pct"]
  3859. hidden_size = self.hparams["hidden_size"]
  3860. num_attention_heads = self.hparams["num_attention_heads"]
  3861. self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
  3862. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3863. @ModelBase.register("OlmoForCausalLM")
  3864. @ModelBase.register("OLMoForCausalLM")
  3865. class OlmoModel(TextModel):
  3866. model_arch = gguf.MODEL_ARCH.OLMO
  3867. def set_gguf_parameters(self):
  3868. super().set_gguf_parameters()
  3869. self.gguf_writer.add_layer_norm_eps(1e-5)
  3870. clip_qkv = self.hparams.get("clip_qkv")
  3871. if clip_qkv is not None:
  3872. self.gguf_writer.add_clamp_kqv(clip_qkv)
  3873. # Same as super class, but permuting q_proj, k_proj
  3874. # Copied from: LlamaModel
  3875. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3876. del bid # unused
  3877. n_head = self.hparams["num_attention_heads"]
  3878. n_kv_head = self.hparams.get("num_key_value_heads")
  3879. if name.endswith("q_proj.weight"):
  3880. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3881. if name.endswith("k_proj.weight"):
  3882. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3883. return [(self.map_tensor_name(name), data_torch)]
  3884. @ModelBase.register("Olmo2ForCausalLM")
  3885. class Olmo2Model(TextModel):
  3886. model_arch = gguf.MODEL_ARCH.OLMO2
  3887. @ModelBase.register("OlmoeForCausalLM")
  3888. class OlmoeModel(TextModel):
  3889. model_arch = gguf.MODEL_ARCH.OLMOE
  3890. def set_gguf_parameters(self):
  3891. super().set_gguf_parameters()
  3892. self.gguf_writer.add_layer_norm_rms_eps(1e-5)
  3893. if (n_experts := self.hparams.get("num_experts")) is not None:
  3894. self.gguf_writer.add_expert_count(n_experts)
  3895. _experts: list[dict[str, Tensor]] | None = None
  3896. # Copied from: Qwen2MoeModel
  3897. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3898. # process the experts separately
  3899. if name.find("experts") != -1:
  3900. n_experts = self.hparams["num_experts"]
  3901. assert bid is not None
  3902. if self._experts is None:
  3903. self._experts = [{} for _ in range(self.block_count)]
  3904. self._experts[bid][name] = data_torch
  3905. if len(self._experts[bid]) >= n_experts * 3:
  3906. tensors: list[tuple[str, Tensor]] = []
  3907. # merge the experts into a single 3d tensor
  3908. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  3909. datas: list[Tensor] = []
  3910. for xid in range(n_experts):
  3911. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  3912. datas.append(self._experts[bid][ename])
  3913. del self._experts[bid][ename]
  3914. data_torch = torch.stack(datas, dim=0)
  3915. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  3916. new_name = self.map_tensor_name(merged_name)
  3917. tensors.append((new_name, data_torch))
  3918. return tensors
  3919. else:
  3920. return []
  3921. return [(self.map_tensor_name(name), data_torch)]
  3922. # Copied from: Qwen2MoeModel
  3923. def prepare_tensors(self):
  3924. super().prepare_tensors()
  3925. if self._experts is not None:
  3926. # flatten `list[dict[str, Tensor]]` into `list[str]`
  3927. experts = [k for d in self._experts for k in d.keys()]
  3928. if len(experts) > 0:
  3929. raise ValueError(f"Unprocessed experts: {experts}")
  3930. @ModelBase.register("JinaBertModel", "JinaBertForMaskedLM")
  3931. class JinaBertV2Model(BertModel):
  3932. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  3933. def set_vocab(self):
  3934. tokenizer_class = 'BertTokenizer'
  3935. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  3936. tokenizer_class = json.load(f)['tokenizer_class']
  3937. if tokenizer_class == 'BertTokenizer':
  3938. super().set_vocab()
  3939. elif tokenizer_class == 'RobertaTokenizer':
  3940. self._set_vocab_gpt2()
  3941. self.gguf_writer.add_token_type_count(2)
  3942. else:
  3943. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  3944. self.gguf_writer.add_add_bos_token(True)
  3945. self.gguf_writer.add_add_eos_token(True)
  3946. @ModelBase.register("OpenELMForCausalLM")
  3947. class OpenELMModel(TextModel):
  3948. model_arch = gguf.MODEL_ARCH.OPENELM
  3949. @staticmethod
  3950. def _make_divisible(v: float | int, divisor: int) -> int:
  3951. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  3952. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  3953. # Make sure that round down does not go down by more than 10%.
  3954. if new_v < 0.9 * v:
  3955. new_v += divisor
  3956. return new_v
  3957. def __init__(self, *args, **kwargs):
  3958. super().__init__(*args, **kwargs)
  3959. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  3960. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  3961. self._n_embd: int = self.hparams["model_dim"]
  3962. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  3963. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  3964. self._ffn_dims: list[int] = [
  3965. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  3966. for multiplier in ffn_multipliers
  3967. ]
  3968. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  3969. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  3970. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  3971. def set_vocab(self):
  3972. try:
  3973. self._set_vocab_sentencepiece()
  3974. except FileNotFoundError:
  3975. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  3976. def set_gguf_parameters(self):
  3977. n_embd = self._n_embd
  3978. head_dim = self.hparams["head_dim"]
  3979. rot_pct = 1.0
  3980. assert self.block_count == len(self._num_kv_heads)
  3981. assert self.block_count == len(self._num_query_heads)
  3982. assert self.block_count == len(self._ffn_dims)
  3983. self.gguf_writer.add_block_count(self.block_count)
  3984. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  3985. self.gguf_writer.add_embedding_length(n_embd)
  3986. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  3987. self.gguf_writer.add_head_count(self._num_query_heads)
  3988. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  3989. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  3990. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  3991. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  3992. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  3993. self.gguf_writer.add_key_length(head_dim)
  3994. self.gguf_writer.add_value_length(head_dim)
  3995. self.gguf_writer.add_file_type(self.ftype)
  3996. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  3997. if "n_layers" in keys:
  3998. return self.hparams["num_transformer_layers"]
  3999. return super().find_hparam(keys, optional)
  4000. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4001. # split ff
  4002. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  4003. ff_dim = self._ffn_dims[bid]
  4004. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  4005. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  4006. return
  4007. yield (self.map_tensor_name(name), data_torch)
  4008. @ModelBase.register("ArcticForCausalLM")
  4009. class ArcticModel(TextModel):
  4010. model_arch = gguf.MODEL_ARCH.ARCTIC
  4011. def set_vocab(self):
  4012. # The reason for using a custom implementation here is that the
  4013. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  4014. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  4015. from sentencepiece import SentencePieceProcessor
  4016. tokenizer_path = self.dir_model / 'tokenizer.model'
  4017. if not tokenizer_path.is_file():
  4018. logger.error(f'Error: Missing {tokenizer_path}')
  4019. sys.exit(1)
  4020. # Read the whole vocabulary from the tokenizer.model file
  4021. tokenizer = SentencePieceProcessor()
  4022. tokenizer.LoadFromFile(str(tokenizer_path))
  4023. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  4024. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4025. scores: list[float] = [-10000.0] * vocab_size
  4026. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4027. for token_id in range(tokenizer.vocab_size()):
  4028. piece = tokenizer.IdToPiece(token_id)
  4029. text = piece.encode("utf-8")
  4030. score = tokenizer.GetScore(token_id)
  4031. toktype = SentencePieceTokenTypes.NORMAL
  4032. if tokenizer.IsUnknown(token_id):
  4033. toktype = SentencePieceTokenTypes.UNKNOWN
  4034. elif tokenizer.IsControl(token_id):
  4035. toktype = SentencePieceTokenTypes.CONTROL
  4036. elif tokenizer.IsUnused(token_id):
  4037. toktype = SentencePieceTokenTypes.UNUSED
  4038. elif tokenizer.IsByte(token_id):
  4039. toktype = SentencePieceTokenTypes.BYTE
  4040. tokens[token_id] = text
  4041. scores[token_id] = score
  4042. toktypes[token_id] = toktype
  4043. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  4044. # of information about added/redefined tokens and modify them accordingly.
  4045. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  4046. if tokenizer_config_file.is_file():
  4047. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  4048. tokenizer_config_json = json.load(f)
  4049. if "added_tokens_decoder" in tokenizer_config_json:
  4050. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  4051. for token_id, token_json in added_tokens_decoder.items():
  4052. token_id = int(token_id)
  4053. if token_id >= vocab_size:
  4054. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  4055. continue
  4056. token_content = token_json["content"]
  4057. token_type = SentencePieceTokenTypes.USER_DEFINED
  4058. token_score = -10000.0
  4059. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  4060. # Set the score to 0.0 as in the original tokenizer.model
  4061. if ("special" in token_json) and token_json["special"]:
  4062. if token_content == tokenizer_config_json["unk_token"]:
  4063. token_type = SentencePieceTokenTypes.UNKNOWN
  4064. else:
  4065. token_type = SentencePieceTokenTypes.CONTROL
  4066. token_score = 0.0
  4067. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  4068. tokens[token_id] = token_content.encode("utf-8")
  4069. toktypes[token_id] = token_type
  4070. scores[token_id] = token_score
  4071. self.gguf_writer.add_tokenizer_model("llama")
  4072. self.gguf_writer.add_tokenizer_pre("default")
  4073. self.gguf_writer.add_token_list(tokens)
  4074. self.gguf_writer.add_token_scores(scores)
  4075. self.gguf_writer.add_token_types(toktypes)
  4076. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4077. special_vocab.add_to_gguf(self.gguf_writer)
  4078. def set_gguf_parameters(self):
  4079. super().set_gguf_parameters()
  4080. hparams = self.hparams
  4081. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4082. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  4083. _experts: list[dict[str, Tensor]] | None = None
  4084. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4085. n_head = self.hparams["num_attention_heads"]
  4086. n_kv_head = self.hparams.get("num_key_value_heads")
  4087. if name.endswith("q_proj.weight"):
  4088. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  4089. if name.endswith("k_proj.weight"):
  4090. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  4091. # process the experts separately
  4092. if name.find("block_sparse_moe.experts") != -1:
  4093. n_experts = self.hparams["num_local_experts"]
  4094. assert bid is not None
  4095. if self._experts is None:
  4096. self._experts = [{} for _ in range(self.block_count)]
  4097. self._experts[bid][name] = data_torch
  4098. if len(self._experts[bid]) >= n_experts * 3:
  4099. tensors: list[tuple[str, Tensor]] = []
  4100. # merge the experts into a single 3d tensor
  4101. for wid in ["w1", "w2", "w3"]:
  4102. datas: list[Tensor] = []
  4103. for xid in range(n_experts):
  4104. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  4105. datas.append(self._experts[bid][ename])
  4106. del self._experts[bid][ename]
  4107. data_torch = torch.stack(datas, dim=0)
  4108. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  4109. new_name = self.map_tensor_name(merged_name)
  4110. tensors.append((new_name, data_torch))
  4111. return tensors
  4112. else:
  4113. return []
  4114. return [(self.map_tensor_name(name), data_torch)]
  4115. def prepare_tensors(self):
  4116. super().prepare_tensors()
  4117. if self._experts is not None:
  4118. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4119. experts = [k for d in self._experts for k in d.keys()]
  4120. if len(experts) > 0:
  4121. raise ValueError(f"Unprocessed experts: {experts}")
  4122. @ModelBase.register("DeepseekForCausalLM")
  4123. class DeepseekModel(TextModel):
  4124. model_arch = gguf.MODEL_ARCH.DEEPSEEK
  4125. def set_vocab(self):
  4126. try:
  4127. self._set_vocab_sentencepiece()
  4128. except FileNotFoundError:
  4129. self._set_vocab_gpt2()
  4130. def set_gguf_parameters(self):
  4131. super().set_gguf_parameters()
  4132. hparams = self.hparams
  4133. if "head_dim" in hparams:
  4134. rope_dim = hparams["head_dim"]
  4135. else:
  4136. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  4137. self.gguf_writer.add_rope_dimension_count(rope_dim)
  4138. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4139. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  4140. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4141. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  4142. self.gguf_writer.add_expert_weights_scale(1.0)
  4143. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  4144. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  4145. _experts: list[dict[str, Tensor]] | None = None
  4146. @staticmethod
  4147. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  4148. if n_head_kv is not None and n_head != n_head_kv:
  4149. n_head = n_head_kv
  4150. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  4151. .swapaxes(1, 2)
  4152. .reshape(weights.shape))
  4153. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4154. n_head = self.hparams["num_attention_heads"]
  4155. n_kv_head = self.hparams.get("num_key_value_heads")
  4156. if name.endswith(("q_proj.weight", "q_proj.bias")):
  4157. data_torch = DeepseekModel.permute(data_torch, n_head, n_head)
  4158. if name.endswith(("k_proj.weight", "k_proj.bias")):
  4159. data_torch = DeepseekModel.permute(data_torch, n_head, n_kv_head)
  4160. # process the experts separately
  4161. if name.find("mlp.experts") != -1:
  4162. n_experts = self.hparams["n_routed_experts"]
  4163. assert bid is not None
  4164. if self._experts is None:
  4165. self._experts = [{} for _ in range(self.block_count)]
  4166. self._experts[bid][name] = data_torch
  4167. if len(self._experts[bid]) >= n_experts * 3:
  4168. tensors: list[tuple[str, Tensor]] = []
  4169. # merge the experts into a single 3d tensor
  4170. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4171. datas: list[Tensor] = []
  4172. for xid in range(n_experts):
  4173. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4174. datas.append(self._experts[bid][ename])
  4175. del self._experts[bid][ename]
  4176. data_torch = torch.stack(datas, dim=0)
  4177. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  4178. new_name = self.map_tensor_name(merged_name)
  4179. tensors.append((new_name, data_torch))
  4180. return tensors
  4181. else:
  4182. return []
  4183. return [(self.map_tensor_name(name), data_torch)]
  4184. def prepare_tensors(self):
  4185. super().prepare_tensors()
  4186. if self._experts is not None:
  4187. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4188. experts = [k for d in self._experts for k in d.keys()]
  4189. if len(experts) > 0:
  4190. raise ValueError(f"Unprocessed experts: {experts}")
  4191. @ModelBase.register("DeepseekV2ForCausalLM")
  4192. @ModelBase.register("DeepseekV3ForCausalLM")
  4193. class DeepseekV2Model(TextModel):
  4194. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  4195. def set_vocab(self):
  4196. self._set_vocab_gpt2()
  4197. def set_gguf_parameters(self):
  4198. # note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
  4199. self.hparams["num_key_value_heads"] = 1
  4200. super().set_gguf_parameters()
  4201. hparams = self.hparams
  4202. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  4203. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4204. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  4205. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  4206. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  4207. # note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
  4208. self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
  4209. self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
  4210. self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  4211. self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
  4212. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  4213. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  4214. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  4215. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  4216. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  4217. if hparams["scoring_func"] == "sigmoid":
  4218. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  4219. elif hparams["scoring_func"] == "softmax":
  4220. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
  4221. else:
  4222. raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
  4223. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  4224. rope_scaling = self.hparams.get("rope_scaling") or {}
  4225. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4226. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4227. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4228. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4229. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * rope_scaling["mscale_all_dim"])
  4230. _experts: list[dict[str, Tensor]] | None = None
  4231. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4232. # rename e_score_correction_bias tensors
  4233. if name.endswith("e_score_correction_bias"):
  4234. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  4235. # skip Multi-Token Prediction (MTP) layers
  4236. block_count = self.hparams["num_hidden_layers"]
  4237. match = re.match(r"model.layers.(\d+)", name)
  4238. if match and int(match.group(1)) >= block_count:
  4239. return []
  4240. # process the experts separately
  4241. if name.find("mlp.experts") != -1:
  4242. n_experts = self.hparams["n_routed_experts"]
  4243. assert bid is not None
  4244. if self._experts is None:
  4245. self._experts = [{} for _ in range(self.block_count)]
  4246. self._experts[bid][name] = data_torch
  4247. if len(self._experts[bid]) >= n_experts * 3:
  4248. tensors: list[tuple[str, Tensor]] = []
  4249. # merge the experts into a single 3d tensor
  4250. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4251. datas: list[Tensor] = []
  4252. for xid in range(n_experts):
  4253. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4254. datas.append(self._experts[bid][ename])
  4255. del self._experts[bid][ename]
  4256. data_torch = torch.stack(datas, dim=0)
  4257. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  4258. new_name = self.map_tensor_name(merged_name)
  4259. tensors.append((new_name, data_torch))
  4260. return tensors
  4261. else:
  4262. return []
  4263. # note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
  4264. if name.endswith("kv_b_proj.weight"):
  4265. name_kb = name.replace("kv_b_proj", "k_b_proj")
  4266. name_vb = name.replace("kv_b_proj", "v_b_proj")
  4267. n_head_kv = self.hparams["num_key_value_heads"]
  4268. v_head_dim = self.hparams["v_head_dim"]
  4269. qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
  4270. assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
  4271. kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
  4272. k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
  4273. k_b = k_b.transpose(1, 2)
  4274. return [
  4275. (self.map_tensor_name(name_kb), k_b),
  4276. (self.map_tensor_name(name_vb), v_b)
  4277. ]
  4278. return [(self.map_tensor_name(name), data_torch)]
  4279. def prepare_tensors(self):
  4280. super().prepare_tensors()
  4281. if self._experts is not None:
  4282. # flatten `list[dict[str, Tensor]]` into `list[str]`
  4283. experts = [k for d in self._experts for k in d.keys()]
  4284. if len(experts) > 0:
  4285. raise ValueError(f"Unprocessed experts: {experts}")
  4286. @ModelBase.register("Dots1ForCausalLM")
  4287. class Dots1Model(Qwen2MoeModel):
  4288. model_arch = gguf.MODEL_ARCH.DOTS1
  4289. def __init__(self, *args, **kwargs):
  4290. super().__init__(*args, **kwargs)
  4291. self.hparams["num_experts"] = self.hparams["n_routed_experts"]
  4292. def set_gguf_parameters(self):
  4293. super().set_gguf_parameters()
  4294. self.gguf_writer.add_leading_dense_block_count(self.hparams["first_k_dense_replace"])
  4295. self.gguf_writer.add_expert_shared_count(self.hparams["n_shared_experts"])
  4296. self.gguf_writer.add_expert_weights_scale(self.hparams["routed_scaling_factor"])
  4297. self.gguf_writer.add_expert_weights_norm(self.hparams["norm_topk_prob"])
  4298. if self.hparams["scoring_func"] == "noaux_tc":
  4299. self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
  4300. else:
  4301. raise ValueError(f"Unsupported scoring_func value: {self.hparams['scoring_func']}")
  4302. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
  4303. if name.endswith("e_score_correction_bias"):
  4304. name = name.replace("e_score_correction_bias", "e_score_correction.bias")
  4305. if "shared_experts" in name:
  4306. return [(self.map_tensor_name(name), data_torch)]
  4307. return super().modify_tensors(data_torch, name, bid)
  4308. @ModelBase.register("PLMForCausalLM")
  4309. class PLMModel(TextModel):
  4310. model_arch = gguf.MODEL_ARCH.PLM
  4311. def set_vocab(self):
  4312. self._set_vocab_gpt2()
  4313. def set_gguf_parameters(self):
  4314. super().set_gguf_parameters()
  4315. hparams = self.hparams
  4316. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4317. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  4318. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  4319. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  4320. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  4321. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4322. return [(self.map_tensor_name(name), data_torch)]
  4323. def prepare_tensors(self):
  4324. super().prepare_tensors()
  4325. @ModelBase.register("T5WithLMHeadModel")
  4326. @ModelBase.register("T5ForConditionalGeneration")
  4327. @ModelBase.register("MT5ForConditionalGeneration")
  4328. @ModelBase.register("UMT5ForConditionalGeneration")
  4329. class T5Model(TextModel):
  4330. model_arch = gguf.MODEL_ARCH.T5
  4331. def __init__(self, *args, **kwargs):
  4332. super().__init__(*args, **kwargs)
  4333. self.shared_token_embeddings_found = False
  4334. def set_vocab(self):
  4335. # to avoid TypeError: Descriptors cannot be created directly
  4336. # exception when importing sentencepiece_model_pb2
  4337. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  4338. from sentencepiece import SentencePieceProcessor
  4339. from sentencepiece import sentencepiece_model_pb2 as model
  4340. tokenizer_path = self.dir_model / 'tokenizer.model'
  4341. # many older models use spiece.model tokenizer model filename
  4342. if not tokenizer_path.is_file():
  4343. tokenizer_path = self.dir_model / 'spiece.model'
  4344. if not tokenizer_path.is_file():
  4345. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  4346. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  4347. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  4348. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  4349. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  4350. # assure the tokenizer model file name is correct
  4351. assert tokenizer_path.name == 'tokenizer.model'
  4352. return self._set_vocab_sentencepiece()
  4353. else:
  4354. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  4355. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  4356. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  4357. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  4358. tokenizer = SentencePieceProcessor()
  4359. tokenizer.LoadFromFile(str(tokenizer_path))
  4360. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  4361. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4362. scores: list[float] = [-10000.0] * vocab_size
  4363. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4364. for token_id in range(tokenizer.vocab_size()):
  4365. piece = tokenizer.IdToPiece(token_id)
  4366. text = piece.encode("utf-8")
  4367. score = tokenizer.GetScore(token_id)
  4368. toktype = SentencePieceTokenTypes.NORMAL
  4369. if tokenizer.IsUnknown(token_id):
  4370. toktype = SentencePieceTokenTypes.UNKNOWN
  4371. elif tokenizer.IsControl(token_id):
  4372. toktype = SentencePieceTokenTypes.CONTROL
  4373. elif tokenizer.IsUnused(token_id):
  4374. toktype = SentencePieceTokenTypes.UNUSED
  4375. elif tokenizer.IsByte(token_id):
  4376. toktype = SentencePieceTokenTypes.BYTE
  4377. tokens[token_id] = text
  4378. scores[token_id] = score
  4379. toktypes[token_id] = toktype
  4380. added_tokens_file = self.dir_model / 'added_tokens.json'
  4381. if added_tokens_file.is_file():
  4382. with open(added_tokens_file, "r", encoding="utf-8") as f:
  4383. added_tokens_json = json.load(f)
  4384. for key in added_tokens_json:
  4385. token_id = added_tokens_json[key]
  4386. if token_id >= vocab_size:
  4387. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  4388. continue
  4389. tokens[token_id] = key.encode("utf-8")
  4390. scores[token_id] = -1000.0
  4391. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  4392. if vocab_size > len(tokens):
  4393. pad_count = vocab_size - len(tokens)
  4394. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  4395. for i in range(1, pad_count + 1):
  4396. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  4397. scores.append(-1000.0)
  4398. toktypes.append(SentencePieceTokenTypes.UNUSED)
  4399. self.gguf_writer.add_tokenizer_model("t5")
  4400. self.gguf_writer.add_tokenizer_pre("default")
  4401. self.gguf_writer.add_token_list(tokens)
  4402. self.gguf_writer.add_token_scores(scores)
  4403. self.gguf_writer.add_token_types(toktypes)
  4404. self.gguf_writer.add_add_space_prefix(add_prefix)
  4405. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  4406. if precompiled_charsmap:
  4407. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  4408. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4409. special_vocab.add_to_gguf(self.gguf_writer)
  4410. self.gguf_writer.add_add_bos_token(False)
  4411. self.gguf_writer.add_add_eos_token(True)
  4412. def set_gguf_parameters(self):
  4413. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  4414. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  4415. n_ctx = 512
  4416. self.gguf_writer.add_context_length(n_ctx)
  4417. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  4418. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  4419. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  4420. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  4421. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  4422. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  4423. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4424. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  4425. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  4426. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  4427. self.gguf_writer.add_file_type(self.ftype)
  4428. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4429. del bid # unused
  4430. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  4431. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  4432. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  4433. # and decoder and ignore the remaining ones.
  4434. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  4435. if not self.shared_token_embeddings_found:
  4436. name = "shared.weight"
  4437. self.shared_token_embeddings_found = True
  4438. else:
  4439. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  4440. return []
  4441. return [(self.map_tensor_name(name), data_torch)]
  4442. @ModelBase.register("T5EncoderModel")
  4443. class T5EncoderModel(TextModel):
  4444. model_arch = gguf.MODEL_ARCH.T5ENCODER
  4445. def __init__(self, *args, **kwargs):
  4446. super().__init__(*args, **kwargs)
  4447. self.shared_token_embeddings_found = False
  4448. def set_vocab(self):
  4449. # to avoid TypeError: Descriptors cannot be created directly
  4450. # exception when importing sentencepiece_model_pb2
  4451. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  4452. from sentencepiece import SentencePieceProcessor
  4453. from sentencepiece import sentencepiece_model_pb2 as model
  4454. tokenizer_path = self.dir_model / 'tokenizer.model'
  4455. # many older models use spiece.model tokenizer model filename
  4456. if not tokenizer_path.is_file():
  4457. tokenizer_path = self.dir_model / 'spiece.model'
  4458. if not tokenizer_path.is_file():
  4459. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  4460. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  4461. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  4462. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  4463. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  4464. # assure the tokenizer model file name is correct
  4465. assert tokenizer_path.name == 'tokenizer.model'
  4466. return self._set_vocab_sentencepiece()
  4467. else:
  4468. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  4469. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  4470. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  4471. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  4472. tokenizer = SentencePieceProcessor()
  4473. tokenizer.LoadFromFile(str(tokenizer_path))
  4474. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  4475. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  4476. scores: list[float] = [-10000.0] * vocab_size
  4477. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  4478. for token_id in range(tokenizer.vocab_size()):
  4479. piece = tokenizer.IdToPiece(token_id)
  4480. text = piece.encode("utf-8")
  4481. score = tokenizer.GetScore(token_id)
  4482. toktype = SentencePieceTokenTypes.NORMAL
  4483. if tokenizer.IsUnknown(token_id):
  4484. toktype = SentencePieceTokenTypes.UNKNOWN
  4485. elif tokenizer.IsControl(token_id):
  4486. toktype = SentencePieceTokenTypes.CONTROL
  4487. elif tokenizer.IsUnused(token_id):
  4488. toktype = SentencePieceTokenTypes.UNUSED
  4489. elif tokenizer.IsByte(token_id):
  4490. toktype = SentencePieceTokenTypes.BYTE
  4491. tokens[token_id] = text
  4492. scores[token_id] = score
  4493. toktypes[token_id] = toktype
  4494. added_tokens_file = self.dir_model / 'added_tokens.json'
  4495. if added_tokens_file.is_file():
  4496. with open(added_tokens_file, "r", encoding="utf-8") as f:
  4497. added_tokens_json = json.load(f)
  4498. for key in added_tokens_json:
  4499. token_id = added_tokens_json[key]
  4500. if token_id >= vocab_size:
  4501. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  4502. continue
  4503. tokens[token_id] = key.encode("utf-8")
  4504. scores[token_id] = -1000.0
  4505. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  4506. if vocab_size > len(tokens):
  4507. pad_count = vocab_size - len(tokens)
  4508. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  4509. for i in range(1, pad_count + 1):
  4510. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  4511. scores.append(-1000.0)
  4512. toktypes.append(SentencePieceTokenTypes.UNUSED)
  4513. self.gguf_writer.add_tokenizer_model("t5")
  4514. self.gguf_writer.add_tokenizer_pre("default")
  4515. self.gguf_writer.add_token_list(tokens)
  4516. self.gguf_writer.add_token_scores(scores)
  4517. self.gguf_writer.add_token_types(toktypes)
  4518. self.gguf_writer.add_add_space_prefix(add_prefix)
  4519. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  4520. if precompiled_charsmap:
  4521. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  4522. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4523. special_vocab.add_to_gguf(self.gguf_writer)
  4524. self.gguf_writer.add_add_bos_token(False)
  4525. self.gguf_writer.add_add_eos_token(True)
  4526. def set_gguf_parameters(self):
  4527. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  4528. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  4529. n_ctx = 512
  4530. self.gguf_writer.add_context_length(n_ctx)
  4531. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  4532. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  4533. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  4534. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  4535. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  4536. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  4537. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4538. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  4539. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  4540. self.gguf_writer.add_file_type(self.ftype)
  4541. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4542. del bid # unused
  4543. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  4544. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  4545. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  4546. # and decoder and ignore the remaining ones.
  4547. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  4548. if not self.shared_token_embeddings_found:
  4549. name = "shared.weight"
  4550. self.shared_token_embeddings_found = True
  4551. else:
  4552. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  4553. return []
  4554. return [(self.map_tensor_name(name), data_torch)]
  4555. @ModelBase.register("JAISLMHeadModel")
  4556. class JaisModel(TextModel):
  4557. model_arch = gguf.MODEL_ARCH.JAIS
  4558. def __init__(self, *args, **kwargs):
  4559. super().__init__(*args, **kwargs)
  4560. # SwigLU activation
  4561. assert self.hparams["activation_function"] == "swiglu"
  4562. # ALiBi position embedding
  4563. assert self.hparams["position_embedding_type"] == "alibi"
  4564. # Embeddings scale
  4565. self.embeddings_scale = 1.0
  4566. if 'mup_embeddings_scale' in self.hparams:
  4567. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  4568. elif 'embeddings_scale' in self.hparams:
  4569. self.embeddings_scale = self.hparams['embeddings_scale']
  4570. else:
  4571. assert False
  4572. self.width_scale = 1.0
  4573. if 'mup_output_alpha' in self.hparams:
  4574. assert 'mup_width_scale' in self.hparams
  4575. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  4576. elif 'width_scale' in self.hparams:
  4577. self.width_scale = self.hparams['width_scale']
  4578. else:
  4579. assert False
  4580. self.max_alibi_bias = 8.0
  4581. def set_vocab(self):
  4582. self._set_vocab_gpt2()
  4583. def set_gguf_parameters(self):
  4584. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  4585. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  4586. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  4587. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  4588. self.gguf_writer.add_head_count(self.hparams["n_head"])
  4589. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  4590. self.gguf_writer.add_file_type(self.ftype)
  4591. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4592. del bid # unused
  4593. tensors: list[tuple[str, Tensor]] = []
  4594. # we don't need these
  4595. if name.endswith((".attn.bias")):
  4596. return tensors
  4597. if name.endswith(("relative_pe.slopes")):
  4598. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  4599. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  4600. # but Jais's PyTorch model simply precalculates the slope values and places them
  4601. # in relative_pes.slopes
  4602. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  4603. first_val = float(data_torch[0].item())
  4604. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  4605. return tensors
  4606. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  4607. data_torch = data_torch.transpose(1, 0)
  4608. new_name = self.map_tensor_name(name)
  4609. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  4610. tensors.append((new_name, data_torch * self.embeddings_scale))
  4611. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  4612. tensors.append((new_name, data_torch * self.width_scale))
  4613. else:
  4614. tensors.append((new_name, data_torch))
  4615. return tensors
  4616. def prepare_tensors(self):
  4617. super().prepare_tensors()
  4618. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  4619. @ModelBase.register("Glm4ForCausalLM")
  4620. class Glm4Model(TextModel):
  4621. model_arch = gguf.MODEL_ARCH.GLM4
  4622. def set_vocab(self):
  4623. from transformers import AutoTokenizer
  4624. tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
  4625. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4626. tokens, toktypes, tokpre = self.get_vocab_base()
  4627. self.gguf_writer.add_tokenizer_model("gpt2")
  4628. self.gguf_writer.add_tokenizer_pre(tokpre)
  4629. self.gguf_writer.add_token_list(tokens)
  4630. self.gguf_writer.add_token_types(toktypes)
  4631. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4632. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4633. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  4634. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  4635. special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4636. special_vocab.add_to_gguf(self.gguf_writer)
  4637. def set_gguf_parameters(self):
  4638. super().set_gguf_parameters()
  4639. rope_dim = self.hparams["head_dim"]
  4640. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  4641. rope_scaling = self.hparams.get("rope_scaling") or {}
  4642. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4643. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4644. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4645. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4646. @ModelBase.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
  4647. class ChatGLMModel(TextModel):
  4648. model_arch = gguf.MODEL_ARCH.CHATGLM
  4649. def set_vocab_chatglm3(self):
  4650. dir_model = self.dir_model
  4651. hparams = self.hparams
  4652. tokens: list[bytes] = []
  4653. toktypes: list[int] = []
  4654. scores: list[float] = []
  4655. from transformers import AutoTokenizer
  4656. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  4657. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  4658. assert max(tokenizer.get_vocab().values()) < vocab_size
  4659. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  4660. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  4661. for token_id in range(vocab_size):
  4662. piece = tokenizer._convert_id_to_token(token_id)
  4663. if token_id == 0:
  4664. piece = "<unk>"
  4665. elif token_id == 1:
  4666. piece = "<bos>"
  4667. elif token_id == 2:
  4668. piece = "<eos>"
  4669. text = piece.encode("utf-8")
  4670. score = 0.0
  4671. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  4672. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  4673. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  4674. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  4675. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  4676. if piece in special_tokens:
  4677. toktype = SentencePieceTokenTypes.CONTROL
  4678. elif len(piece) == 0:
  4679. text = f"[PAD{token_id}]".encode("utf-8")
  4680. toktype = SentencePieceTokenTypes.UNUSED
  4681. else:
  4682. toktype = SentencePieceTokenTypes.USER_DEFINED
  4683. tokens.append(text)
  4684. scores.append(score)
  4685. toktypes.append(toktype)
  4686. continue
  4687. toktype = SentencePieceTokenTypes.NORMAL
  4688. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  4689. toktype = SentencePieceTokenTypes.UNKNOWN
  4690. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  4691. toktype = SentencePieceTokenTypes.CONTROL
  4692. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  4693. toktype = SentencePieceTokenTypes.UNUSED
  4694. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  4695. toktype = SentencePieceTokenTypes.BYTE
  4696. tokens.append(text)
  4697. scores.append(score)
  4698. toktypes.append(toktype)
  4699. self.gguf_writer.add_tokenizer_model("llama")
  4700. # glm3 needs prefix and suffix formatted as:
  4701. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  4702. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  4703. self.gguf_writer.add_token_list(tokens)
  4704. self.gguf_writer.add_token_scores(scores)
  4705. self.gguf_writer.add_token_types(toktypes)
  4706. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  4707. special_vocab.add_to_gguf(self.gguf_writer)
  4708. @staticmethod
  4709. def token_bytes_to_string(b):
  4710. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  4711. byte_encoder = bytes_to_unicode()
  4712. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  4713. @staticmethod
  4714. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  4715. parts = [bytes([b]) for b in token]
  4716. while True:
  4717. min_idx = None
  4718. min_rank = None
  4719. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  4720. rank = mergeable_ranks.get(pair[0] + pair[1])
  4721. if rank is not None and (min_rank is None or rank < min_rank):
  4722. min_idx = i
  4723. min_rank = rank
  4724. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  4725. break
  4726. assert min_idx is not None
  4727. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  4728. return parts
  4729. def set_vocab(self):
  4730. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  4731. self.set_vocab_chatglm3()
  4732. return
  4733. dir_model = self.dir_model
  4734. hparams = self.hparams
  4735. tokens: list[str] = []
  4736. toktypes: list[int] = []
  4737. from transformers import AutoTokenizer
  4738. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  4739. vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"])
  4740. assert max(tokenizer.get_vocab().values()) < vocab_size
  4741. tokens, toktypes, tokpre = self.get_vocab_base()
  4742. self.gguf_writer.add_tokenizer_model("gpt2")
  4743. self.gguf_writer.add_tokenizer_pre(tokpre)
  4744. self.gguf_writer.add_token_list(tokens)
  4745. self.gguf_writer.add_token_types(toktypes)
  4746. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  4747. # only add special tokens when they were not already loaded from config.json
  4748. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  4749. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  4750. # this one is usually not in config.json anyway
  4751. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  4752. special_vocab.add_to_gguf(self.gguf_writer)
  4753. def set_gguf_parameters(self):
  4754. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  4755. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  4756. n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head))
  4757. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  4758. self.gguf_writer.add_embedding_length(n_embed)
  4759. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed)))
  4760. self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"]))
  4761. self.gguf_writer.add_head_count(n_head)
  4762. self.gguf_writer.add_head_count_kv(n_head_kv)
  4763. self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5))
  4764. self.gguf_writer.add_file_type(self.ftype)
  4765. if "attention_dim" in self.hparams:
  4766. rope_dim = self.hparams["attention_dim"]
  4767. else:
  4768. rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  4769. self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
  4770. self.gguf_writer.add_add_bos_token(False)
  4771. rope_freq = 10000
  4772. if "rope_ratio" in self.hparams:
  4773. rope_freq = rope_freq * self.hparams["rope_ratio"]
  4774. self.gguf_writer.add_rope_freq_base(rope_freq)
  4775. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4776. del bid # unused
  4777. if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."):
  4778. return []
  4779. name = name.removeprefix("transformer.")
  4780. return [(self.map_tensor_name(name), data_torch)]
  4781. @ModelBase.register("NemotronForCausalLM")
  4782. class NemotronModel(TextModel):
  4783. model_arch = gguf.MODEL_ARCH.NEMOTRON
  4784. def set_vocab(self):
  4785. self._set_vocab_sentencepiece()
  4786. self.gguf_writer.add_pad_token_id(0)
  4787. self.gguf_writer.add_unk_token_id(1)
  4788. def set_gguf_parameters(self):
  4789. super().set_gguf_parameters()
  4790. hparams = self.hparams
  4791. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4792. f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
  4793. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  4794. # * Partial RoPE
  4795. rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
  4796. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  4797. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  4798. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  4799. # * RopeScaling for Nemotron
  4800. if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
  4801. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4802. else:
  4803. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4804. self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
  4805. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4806. # * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
  4807. # model.layers.{l}.input_layernorm.weight
  4808. # model.layers.{l}.post_attention_layernorm.weight
  4809. # model.norm.weight
  4810. if name.endswith("norm.weight"):
  4811. data_torch = data_torch + 1
  4812. return [(self.map_tensor_name(name), data_torch)]
  4813. @ModelBase.register("ExaoneForCausalLM")
  4814. class ExaoneModel(TextModel):
  4815. model_arch = gguf.MODEL_ARCH.EXAONE
  4816. def set_gguf_parameters(self):
  4817. hparams = self.hparams
  4818. assert (hparams["activation_function"] == "silu")
  4819. max_position_embeddings = hparams["max_position_embeddings"]
  4820. embed_dim = hparams["hidden_size"]
  4821. num_heads = hparams["num_attention_heads"]
  4822. num_kv_heads = hparams.get("num_key_value_heads", num_heads)
  4823. layer_norm_eps = hparams["layer_norm_epsilon"]
  4824. intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
  4825. num_layers = hparams["num_layers"]
  4826. # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
  4827. # attention_dropout_rate = hparams["attention_dropout"]
  4828. # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
  4829. # embed_dropout_rate = hparams["embed_dropout"]
  4830. self.gguf_writer.add_embedding_length(embed_dim)
  4831. self.gguf_writer.add_head_count(num_heads)
  4832. self.gguf_writer.add_head_count_kv(num_kv_heads)
  4833. self.gguf_writer.add_context_length(max_position_embeddings)
  4834. self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
  4835. self.gguf_writer.add_feed_forward_length(intermediate_size)
  4836. self.gguf_writer.add_block_count(num_layers)
  4837. self.gguf_writer.add_file_type(self.ftype)
  4838. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  4839. self.gguf_writer.add_rope_freq_base(rope_theta)
  4840. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
  4841. rotary_factor = rotary_factor if rotary_factor is not None else 1.0
  4842. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  4843. rope_scaling = self.hparams.get("rope_scaling") or {}
  4844. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "linear" and "factor" in rope_scaling:
  4845. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  4846. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4847. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  4848. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  4849. if rope_scaling.get("rope_type", '').lower() == "llama3":
  4850. base = self.hparams.get("rope_theta", 10000.0)
  4851. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  4852. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  4853. factor = rope_scaling.get("factor", 8.0)
  4854. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  4855. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  4856. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  4857. low_freq_wavelen = old_context_len / low_freq_factor
  4858. high_freq_wavelen = old_context_len / high_freq_factor
  4859. assert low_freq_wavelen != high_freq_wavelen
  4860. rope_factors = []
  4861. for freq in freqs:
  4862. wavelen = 2 * math.pi / freq
  4863. if wavelen < high_freq_wavelen:
  4864. rope_factors.append(1)
  4865. elif wavelen > low_freq_wavelen:
  4866. rope_factors.append(factor)
  4867. else:
  4868. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  4869. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  4870. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  4871. @ModelBase.register("GraniteForCausalLM")
  4872. class GraniteModel(LlamaModel):
  4873. """Conversion for IBM's GraniteForCausalLM"""
  4874. model_arch = gguf.MODEL_ARCH.GRANITE
  4875. def set_gguf_parameters(self):
  4876. """Granite uses standard llama parameters with the following differences:
  4877. - No head_dim support
  4878. - New multiplier params:
  4879. - attention_scale
  4880. - embedding_scale
  4881. - residual_scale
  4882. - logits_scaling
  4883. """
  4884. if head_dim := self.hparams.pop("head_dim", None):
  4885. logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
  4886. super().set_gguf_parameters()
  4887. # NOTE: Convert _multiplier params to _scale params for naming
  4888. # consistency
  4889. if attention_scale := self.hparams.get("attention_multiplier"):
  4890. self.gguf_writer.add_attention_scale(attention_scale)
  4891. logger.info("gguf: (granite) attention_scale = %s", attention_scale)
  4892. if embedding_scale := self.hparams.get("embedding_multiplier"):
  4893. self.gguf_writer.add_embedding_scale(embedding_scale)
  4894. logger.info("gguf: (granite) embedding_scale = %s", embedding_scale)
  4895. if residual_scale := self.hparams.get("residual_multiplier"):
  4896. self.gguf_writer.add_residual_scale(residual_scale)
  4897. logger.info("gguf: (granite) residual_scale = %s", residual_scale)
  4898. if logits_scale := self.hparams.get("logits_scaling"):
  4899. self.gguf_writer.add_logit_scale(logits_scale)
  4900. logger.info("gguf: (granite) logits_scale = %s", logits_scale)
  4901. @ModelBase.register("GraniteMoeForCausalLM", "GraniteMoeSharedForCausalLM")
  4902. class GraniteMoeModel(GraniteModel):
  4903. """Conversion for IBM's GraniteMoeForCausalLM"""
  4904. model_arch = gguf.MODEL_ARCH.GRANITE_MOE
  4905. def set_gguf_parameters(self):
  4906. """GraniteMoeShared uses GraniteMoe parameters plus the following:
  4907. - shared_intermediate_size
  4908. """
  4909. super().set_gguf_parameters()
  4910. if shared_feed_forward_length := self.hparams.get("shared_intermediate_size"):
  4911. self.gguf_writer.add_expert_shared_feed_forward_length(shared_feed_forward_length)
  4912. logger.info("gguf: (granitemoeshared) shared_feed_forward_length = %s", shared_feed_forward_length)
  4913. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4914. """In modeling_granitemoe, the JetMoe implementation of parallel experts
  4915. is used. This essentially merges w1 and w3 into a single tensor with 2x
  4916. the hidden size that is then split during forward. To keep compatibility
  4917. with existing mixtral support, we pull them apart here.
  4918. """
  4919. if name.endswith("block_sparse_moe.input_linear.weight"):
  4920. ffn_dim = self.hparams["intermediate_size"]
  4921. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size"
  4922. gate, up = data_torch.split(ffn_dim, dim=-2)
  4923. return [
  4924. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate),
  4925. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up),
  4926. ]
  4927. if name.endswith("shared_mlp.input_linear.weight"):
  4928. ffn_dim = self.hparams["shared_intermediate_size"]
  4929. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * shared_intermediate_size"
  4930. gate, up = data_torch.split(ffn_dim, dim=-2)
  4931. return [
  4932. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), gate),
  4933. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), up),
  4934. ]
  4935. return super().modify_tensors(data_torch, name, bid)
  4936. @ModelBase.register("BailingMoeForCausalLM")
  4937. class BailingMoeModel(TextModel):
  4938. model_arch = gguf.MODEL_ARCH.BAILINGMOE
  4939. def set_vocab(self):
  4940. self._set_vocab_gpt2()
  4941. def set_gguf_parameters(self):
  4942. super().set_gguf_parameters()
  4943. hparams = self.hparams
  4944. rope_dim = hparams.get("head_dim") or hparams["hidden_size"] // hparams["num_attention_heads"]
  4945. self.gguf_writer.add_rope_dimension_count(rope_dim)
  4946. rope_scaling = self.hparams.get("rope_scaling") or {}
  4947. if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
  4948. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  4949. self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
  4950. self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
  4951. else:
  4952. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  4953. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  4954. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  4955. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  4956. self.gguf_writer.add_expert_weights_scale(1.0)
  4957. self.gguf_writer.add_expert_count(hparams["num_experts"])
  4958. self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
  4959. self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
  4960. _experts: list[dict[str, Tensor]] | None = None
  4961. @staticmethod
  4962. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  4963. if n_head_kv is not None and n_head != n_head_kv:
  4964. n_head = n_head_kv
  4965. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  4966. .swapaxes(1, 2)
  4967. .reshape(weights.shape))
  4968. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  4969. n_head = self.hparams["num_attention_heads"]
  4970. n_kv_head = self.hparams.get("num_key_value_heads")
  4971. n_embd = self.hparams["hidden_size"]
  4972. head_dim = self.hparams.get("head_dim") or n_embd // n_head
  4973. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  4974. if name.endswith("attention.dense.weight"):
  4975. return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), data_torch)]
  4976. elif name.endswith("query_key_value.weight"):
  4977. q, k, v = data_torch.split([n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim], dim=-2)
  4978. return [
  4979. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), BailingMoeModel.permute(q, n_head, n_head)),
  4980. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), BailingMoeModel.permute(k, n_head, n_kv_head)),
  4981. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v)
  4982. ]
  4983. elif name.find("mlp.experts") != -1:
  4984. n_experts = self.hparams["num_experts"]
  4985. assert bid is not None
  4986. tensors: list[tuple[str, Tensor]] = []
  4987. if self._experts is None:
  4988. self._experts = [{} for _ in range(self.block_count)]
  4989. self._experts[bid][name] = data_torch
  4990. if len(self._experts[bid]) >= n_experts * 3:
  4991. # merge the experts into a single 3d tensor
  4992. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  4993. datas: list[Tensor] = []
  4994. for xid in range(n_experts):
  4995. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  4996. datas.append(self._experts[bid][ename])
  4997. del self._experts[bid][ename]
  4998. data_torch = torch.stack(datas, dim=0)
  4999. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  5000. new_name = self.map_tensor_name(merged_name)
  5001. tensors.append((new_name, data_torch))
  5002. return tensors
  5003. new_name = self.map_tensor_name(name)
  5004. if new_name == output_name and self.hparams.get("norm_head"):
  5005. data_torch = data_torch.float()
  5006. data_torch /= torch.norm(data_torch, p=2, dim=0, keepdim=True) + 1e-7
  5007. return [(new_name, data_torch)]
  5008. def prepare_tensors(self):
  5009. super().prepare_tensors()
  5010. if self._experts is not None:
  5011. # flatten `list[dict[str, Tensor]]` into `list[str]`
  5012. experts = [k for d in self._experts for k in d.keys()]
  5013. if len(experts) > 0:
  5014. raise ValueError(f"Unprocessed experts: {experts}")
  5015. @ModelBase.register("ChameleonForConditionalGeneration")
  5016. @ModelBase.register("ChameleonForCausalLM") # obsolete
  5017. class ChameleonModel(TextModel):
  5018. model_arch = gguf.MODEL_ARCH.CHAMELEON
  5019. def set_gguf_parameters(self):
  5020. super().set_gguf_parameters()
  5021. self.gguf_writer.add_swin_norm(self.hparams.get("swin_norm", False))
  5022. def set_vocab(self):
  5023. self._set_vocab_gpt2()
  5024. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5025. # ignore image tokenizer for now
  5026. # TODO: remove this once image support is implemented for Chameleon
  5027. if name.startswith("model.vqmodel"):
  5028. return []
  5029. n_head = self.hparams["num_attention_heads"]
  5030. n_kv_head = self.hparams.get("num_key_value_heads")
  5031. hidden_dim = self.hparams.get("hidden_size")
  5032. if name.endswith(("q_proj.weight", "q_proj.bias")):
  5033. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  5034. if name.endswith(("k_proj.weight", "k_proj.bias")):
  5035. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  5036. if name.endswith(("q_norm.weight", "q_norm.bias")):
  5037. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim)
  5038. if name.endswith(("k_norm.weight", "k_norm.bias")):
  5039. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim)
  5040. return [(self.map_tensor_name(name), data_torch)]
  5041. # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203
  5042. @staticmethod
  5043. def _reverse_hf_permute(data_torch, n_heads, hidden_dim):
  5044. head_dim = hidden_dim // n_heads
  5045. data_torch = data_torch[0].view(2, head_dim // 2).t().reshape(1, -1)
  5046. data_torch = data_torch.repeat_interleave(n_heads, 0)
  5047. return data_torch
  5048. @ModelBase.register("UltravoxModel")
  5049. class UltravoxModel(TextModel):
  5050. model_arch = gguf.MODEL_ARCH.LLAMA # dummy
  5051. def __init__(self, *args, **kwargs):
  5052. super().__init__(*args, **kwargs)
  5053. raise NotImplementedError("Ultravox does not have text decoder. Instead, it uses Llama or other models for text. If you want to get the audio encoder, please use --mmproj argument")
  5054. @ModelBase.register("Qwen2AudioForConditionalGeneration")
  5055. class WhisperEncoderModel(MmprojModel):
  5056. has_vision_encoder = False # no vision encoder
  5057. has_audio_encoder = True
  5058. def __init__(self, *args, **kwargs):
  5059. super().__init__(*args, **kwargs)
  5060. self.hparams["hidden_size"] = self.hparams["d_model"]
  5061. self.hparams["intermediate_size"] = self.hparams["encoder_ffn_dim"]
  5062. self.hparams["num_attention_heads"] = self.hparams["encoder_attention_heads"]
  5063. def set_gguf_parameters(self):
  5064. super().set_gguf_parameters()
  5065. self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2A)
  5066. self.gguf_writer.add_audio_num_mel_bins(self.hparams["num_mel_bins"])
  5067. self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
  5068. def tensor_force_quant(self, name, new_name, bid, n_dims):
  5069. del bid, new_name, n_dims # unused
  5070. if ".conv" in name and ".weight" in name:
  5071. return gguf.GGMLQuantizationType.F16
  5072. return False
  5073. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  5074. del bid # unused
  5075. if name.startswith("language_model."):
  5076. # skip language model tensors
  5077. return []
  5078. # prevent clash naming with vision tensors
  5079. if name.startswith("multi_modal_projector"):
  5080. name = "audio." + name
  5081. if "conv1.bias" in name or "conv2.bias" in name:
  5082. # transpose conv1 and conv2 bias
  5083. data_torch = data_torch.unsqueeze(-1)
  5084. return [(self.map_tensor_name(name), data_torch)]
  5085. @ModelBase.register("UltravoxModel")
  5086. class UltravoxWhisperEncoderModel(WhisperEncoderModel):
  5087. has_vision_encoder = False # no vision encoder
  5088. has_audio_encoder = True
  5089. def set_gguf_parameters(self):
  5090. super().set_gguf_parameters()
  5091. self.gguf_writer.add_audio_stack_factor(self.global_config["stack_factor"])
  5092. ###### CONVERSION LOGIC ######
  5093. # tree of lazy tensors
  5094. class LazyTorchTensor(gguf.LazyBase):
  5095. _tensor_type = torch.Tensor
  5096. # to keep the type-checker happy
  5097. dtype: torch.dtype
  5098. shape: torch.Size
  5099. # only used when converting a torch.Tensor to a np.ndarray
  5100. _dtype_map: dict[torch.dtype, type] = {
  5101. torch.float16: np.float16,
  5102. torch.float32: np.float32,
  5103. }
  5104. # used for safetensors slices
  5105. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  5106. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  5107. _dtype_str_map: dict[str, torch.dtype] = {
  5108. "F64": torch.float64,
  5109. "F32": torch.float32,
  5110. "BF16": torch.bfloat16,
  5111. "F16": torch.float16,
  5112. # "U64": torch.uint64,
  5113. "I64": torch.int64,
  5114. # "U32": torch.uint32,
  5115. "I32": torch.int32,
  5116. # "U16": torch.uint16,
  5117. "I16": torch.int16,
  5118. "U8": torch.uint8,
  5119. "I8": torch.int8,
  5120. "BOOL": torch.bool,
  5121. "F8_E4M3": torch.float8_e4m3fn,
  5122. "F8_E5M2": torch.float8_e5m2,
  5123. }
  5124. def numpy(self) -> gguf.LazyNumpyTensor:
  5125. dtype = self._dtype_map[self.dtype]
  5126. return gguf.LazyNumpyTensor(
  5127. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  5128. args=(self,),
  5129. func=(lambda s: s.numpy())
  5130. )
  5131. @classmethod
  5132. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  5133. return torch.empty(size=shape, dtype=dtype, device="meta")
  5134. @classmethod
  5135. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  5136. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  5137. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  5138. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  5139. return cast(torch.Tensor, lazy)
  5140. @classmethod
  5141. def from_remote_tensor(cls, remote_tensor: gguf.utility.RemoteTensor):
  5142. dtype = cls._dtype_str_map[remote_tensor.dtype]
  5143. shape = remote_tensor.shape
  5144. meta = cls.meta_with_dtype_and_shape(dtype, shape)
  5145. lazy = cls(meta=meta, args=(remote_tensor,), func=lambda r: torch.frombuffer(r.data(), dtype=dtype).reshape(shape))
  5146. return cast(torch.Tensor, lazy)
  5147. @classmethod
  5148. def __torch_function__(cls, func, types, args=(), kwargs=None):
  5149. del types # unused
  5150. if kwargs is None:
  5151. kwargs = {}
  5152. if func is torch.Tensor.numpy:
  5153. return args[0].numpy()
  5154. return cls._wrap_fn(func)(*args, **kwargs)
  5155. def parse_args() -> argparse.Namespace:
  5156. parser = argparse.ArgumentParser(
  5157. description="Convert a huggingface model to a GGML compatible file")
  5158. parser.add_argument(
  5159. "--vocab-only", action="store_true",
  5160. help="extract only the vocab",
  5161. )
  5162. parser.add_argument(
  5163. "--outfile", type=Path,
  5164. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  5165. )
  5166. parser.add_argument(
  5167. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
  5168. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  5169. )
  5170. parser.add_argument(
  5171. "--bigendian", action="store_true",
  5172. help="model is executed on big endian machine",
  5173. )
  5174. parser.add_argument(
  5175. "model", type=Path,
  5176. help="directory containing model file",
  5177. nargs="?",
  5178. )
  5179. parser.add_argument(
  5180. "--use-temp-file", action="store_true",
  5181. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  5182. )
  5183. parser.add_argument(
  5184. "--no-lazy", action="store_true",
  5185. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  5186. )
  5187. parser.add_argument(
  5188. "--model-name", type=str, default=None,
  5189. help="name of the model",
  5190. )
  5191. parser.add_argument(
  5192. "--verbose", action="store_true",
  5193. help="increase output verbosity",
  5194. )
  5195. parser.add_argument(
  5196. "--split-max-tensors", type=int, default=0,
  5197. help="max tensors in each split",
  5198. )
  5199. parser.add_argument(
  5200. "--split-max-size", type=str, default="0",
  5201. help="max size per split N(M|G)",
  5202. )
  5203. parser.add_argument(
  5204. "--dry-run", action="store_true",
  5205. help="only print out a split plan and exit, without writing any new files",
  5206. )
  5207. parser.add_argument(
  5208. "--no-tensor-first-split", action="store_true",
  5209. help="do not add tensors to the first split (disabled by default)"
  5210. )
  5211. parser.add_argument(
  5212. "--metadata", type=Path,
  5213. help="Specify the path for an authorship metadata override file"
  5214. )
  5215. parser.add_argument(
  5216. "--print-supported-models", action="store_true",
  5217. help="Print the supported models"
  5218. )
  5219. parser.add_argument(
  5220. "--remote", action="store_true",
  5221. help="(Experimental) Read safetensors file remotely without downloading to disk. Config and tokenizer files will still be downloaded. To use this feature, you need to specify Hugging Face model repo name instead of a local directory. For example: 'HuggingFaceTB/SmolLM2-1.7B-Instruct'. Note: To access gated repo, set HF_TOKEN environment variable to your Hugging Face token.",
  5222. )
  5223. parser.add_argument(
  5224. "--mmproj", action="store_true",
  5225. help="(Experimental) Export multimodal projector (mmproj) for vision models. This will only work on some vision models. A prefix 'mmproj-' will be added to the output file name.",
  5226. )
  5227. args = parser.parse_args()
  5228. if not args.print_supported_models and args.model is None:
  5229. parser.error("the following arguments are required: model")
  5230. return args
  5231. def split_str_to_n_bytes(split_str: str) -> int:
  5232. if split_str.endswith("K"):
  5233. n = int(split_str[:-1]) * 1000
  5234. elif split_str.endswith("M"):
  5235. n = int(split_str[:-1]) * 1000 * 1000
  5236. elif split_str.endswith("G"):
  5237. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  5238. elif split_str.isnumeric():
  5239. n = int(split_str)
  5240. else:
  5241. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  5242. if n < 0:
  5243. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  5244. return n
  5245. def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> str:
  5246. # TODO @ngxson : this won't work correctly if the model has both audio & vision encoders
  5247. # maybe we should fallback to text model's arch in that case, since not many models have both
  5248. text_config = hparams.get("text_config", {})
  5249. vision_config = hparams.get("vision_config", {})
  5250. arch = hparams["architectures"][0]
  5251. # if "architectures" is found in the sub-config, use that instead
  5252. if model_type == ModelType.TEXT and text_config.get("architectures") is not None:
  5253. arch = text_config["architectures"][0]
  5254. elif model_type == ModelType.MMPROJ and vision_config.get("architectures") is not None:
  5255. arch = vision_config["architectures"][0]
  5256. return arch
  5257. def main() -> None:
  5258. args = parse_args()
  5259. if args.print_supported_models:
  5260. logger.error("Supported models:")
  5261. ModelBase.print_registered_models()
  5262. sys.exit(0)
  5263. if args.verbose:
  5264. logging.basicConfig(level=logging.DEBUG)
  5265. else:
  5266. logging.basicConfig(level=logging.INFO)
  5267. dir_model = args.model
  5268. if args.remote:
  5269. from huggingface_hub import snapshot_download
  5270. local_dir = snapshot_download(
  5271. repo_id=str(dir_model),
  5272. allow_patterns=["LICENSE", "*.json", "*.md", "*.txt", "tokenizer.model"])
  5273. dir_model = Path(local_dir)
  5274. logger.info(f"Downloaded config and tokenizer to {local_dir}")
  5275. if not dir_model.is_dir():
  5276. logger.error(f'Error: {args.model} is not a directory')
  5277. sys.exit(1)
  5278. ftype_map: dict[str, gguf.LlamaFileType] = {
  5279. "f32": gguf.LlamaFileType.ALL_F32,
  5280. "f16": gguf.LlamaFileType.MOSTLY_F16,
  5281. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  5282. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  5283. "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
  5284. "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
  5285. "auto": gguf.LlamaFileType.GUESSED,
  5286. }
  5287. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  5288. if args.use_temp_file and is_split:
  5289. logger.error("Error: Cannot use temp file when splitting")
  5290. sys.exit(1)
  5291. if args.outfile is not None:
  5292. fname_out = args.outfile
  5293. elif args.remote:
  5294. # if remote, use the model ID as the output file name
  5295. fname_out = Path("./" + str(args.model).replace("/", "-") + "-{ftype}.gguf")
  5296. else:
  5297. fname_out = dir_model
  5298. logger.info(f"Loading model: {dir_model.name}")
  5299. if args.mmproj:
  5300. if "mmproj" not in fname_out.name:
  5301. fname_out = ModelBase.add_prefix_to_filename(fname_out, "mmproj-")
  5302. with torch.inference_mode():
  5303. output_type = ftype_map[args.outtype]
  5304. model_type = ModelType.MMPROJ if args.mmproj else ModelType.TEXT
  5305. hparams = ModelBase.load_hparams(dir_model)
  5306. model_architecture = get_model_architecture(hparams, model_type)
  5307. logger.info(f"Model architecture: {model_architecture}")
  5308. try:
  5309. model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type)
  5310. except NotImplementedError:
  5311. logger.error(f"Model {model_architecture} is not supported")
  5312. sys.exit(1)
  5313. model_instance = model_class(dir_model, output_type, fname_out,
  5314. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  5315. eager=args.no_lazy,
  5316. metadata_override=args.metadata, model_name=args.model_name,
  5317. split_max_tensors=args.split_max_tensors,
  5318. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  5319. small_first_shard=args.no_tensor_first_split,
  5320. remote_hf_model_id=str(args.model) if args.remote else None)
  5321. if args.vocab_only:
  5322. logger.info("Exporting model vocab...")
  5323. model_instance.write_vocab()
  5324. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  5325. else:
  5326. logger.info("Exporting model...")
  5327. model_instance.write()
  5328. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  5329. logger.info(f"Model successfully exported to {out_path}")
  5330. if __name__ == '__main__':
  5331. main()