convert_hf_to_gguf.py 201 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import ast
  5. import logging
  6. import argparse
  7. import contextlib
  8. import json
  9. import os
  10. import re
  11. import sys
  12. from enum import IntEnum
  13. from pathlib import Path
  14. from hashlib import sha256
  15. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  16. from itertools import chain
  17. import math
  18. import numpy as np
  19. import torch
  20. if TYPE_CHECKING:
  21. from torch import Tensor
  22. if 'NO_LOCAL_GGUF' not in os.environ:
  23. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  24. import gguf
  25. logger = logging.getLogger("hf-to-gguf")
  26. ###### MODEL DEFINITIONS ######
  27. class SentencePieceTokenTypes(IntEnum):
  28. NORMAL = 1
  29. UNKNOWN = 2
  30. CONTROL = 3
  31. USER_DEFINED = 4
  32. UNUSED = 5
  33. BYTE = 6
  34. AnyModel = TypeVar("AnyModel", bound="type[Model]")
  35. class Model:
  36. _model_classes: dict[str, type[Model]] = {}
  37. dir_model: Path
  38. ftype: gguf.LlamaFileType
  39. fname_out: Path
  40. is_big_endian: bool
  41. endianess: gguf.GGUFEndian
  42. use_temp_file: bool
  43. lazy: bool
  44. part_names: list[str]
  45. is_safetensors: bool
  46. hparams: dict[str, Any]
  47. block_count: int
  48. tensor_map: gguf.TensorNameMap
  49. tensor_names: set[str] | None
  50. gguf_writer: gguf.GGUFWriter
  51. model_name: str | None
  52. metadata_override: Path | None
  53. dir_model_card: Path
  54. # subclasses should define this!
  55. model_arch: gguf.MODEL_ARCH
  56. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, is_big_endian: bool = False,
  57. use_temp_file: bool = False, eager: bool = False,
  58. metadata_override: Path | None = None, model_name: str | None = None,
  59. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
  60. small_first_shard: bool = False, hparams: dict[str, Any] | None = None):
  61. if type(self) is Model:
  62. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  63. self.dir_model = dir_model
  64. self.ftype = ftype
  65. self.fname_out = fname_out
  66. self.is_big_endian = is_big_endian
  67. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  68. self.use_temp_file = use_temp_file
  69. self.lazy = not eager
  70. self.part_names = Model.get_model_part_names(self.dir_model, "model", ".safetensors")
  71. self.is_safetensors = len(self.part_names) > 0
  72. if not self.is_safetensors:
  73. self.part_names = Model.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  74. self.hparams = Model.load_hparams(self.dir_model) if hparams is None else hparams
  75. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  76. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  77. self.tensor_names = None
  78. self.metadata_override = metadata_override
  79. self.model_name = model_name
  80. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  81. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  82. if self.ftype == gguf.LlamaFileType.GUESSED:
  83. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  84. _, first_tensor = next(self.get_tensors())
  85. if first_tensor.dtype == torch.float16:
  86. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  87. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  88. else:
  89. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  90. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  91. # Configure GGUF Writer
  92. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  93. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  94. @classmethod
  95. def __init_subclass__(cls):
  96. # can't use an abstract property, because overriding it without type errors
  97. # would require using decorated functions instead of simply defining the property
  98. if "model_arch" not in cls.__dict__:
  99. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  100. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  101. key = next((k for k in keys if k in self.hparams), None)
  102. if key is not None:
  103. return self.hparams[key]
  104. if optional:
  105. return None
  106. raise KeyError(f"could not find any of: {keys}")
  107. def set_vocab(self):
  108. self._set_vocab_gpt2()
  109. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  110. tensor_names_from_parts: set[str] = set()
  111. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  112. index_name += ".index.json"
  113. index_file = self.dir_model / index_name
  114. if index_file.is_file():
  115. self.tensor_names = set()
  116. logger.info(f"gguf: loading model weight map from '{index_name}'")
  117. with open(index_file, "r", encoding="utf-8") as f:
  118. index: dict[str, Any] = json.load(f)
  119. weight_map = index.get("weight_map")
  120. if weight_map is None or not isinstance(weight_map, dict):
  121. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  122. self.tensor_names.update(weight_map.keys())
  123. else:
  124. self.tensor_names = tensor_names_from_parts
  125. weight_map = {}
  126. for part_name in self.part_names:
  127. logger.info(f"gguf: loading model part '{part_name}'")
  128. ctx: ContextManager[Any]
  129. if self.is_safetensors:
  130. from safetensors import safe_open
  131. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  132. else:
  133. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  134. with ctx as model_part:
  135. tensor_names_from_parts.update(model_part.keys())
  136. for name in model_part.keys():
  137. if self.is_safetensors:
  138. if self.lazy:
  139. data = model_part.get_slice(name)
  140. data = LazyTorchTensor.from_safetensors_slice(data)
  141. else:
  142. data = model_part.get_tensor(name)
  143. else:
  144. data = model_part[name]
  145. if self.lazy:
  146. data = LazyTorchTensor.from_eager(data)
  147. yield name, data
  148. # verify tensor name presence and identify potentially missing files
  149. if len(tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  150. missing = sorted(self.tensor_names.difference(tensor_names_from_parts))
  151. extra = sorted(tensor_names_from_parts.difference(self.tensor_names))
  152. missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
  153. if len(extra) == 0 and len(missing_files) > 0:
  154. raise ValueError(f"Missing or incomplete model files: {missing_files}")
  155. else:
  156. raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
  157. f"Missing tensors: {missing}\n"
  158. f"Extra tensors: {extra}")
  159. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  160. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  161. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  162. name: str = gguf.TENSOR_NAMES[key]
  163. if "{bid}" in name:
  164. assert bid is not None
  165. name = name.format(bid=bid)
  166. return name + suffix
  167. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  168. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  169. return False
  170. key_name: str = gguf.TENSOR_NAMES[key]
  171. if "{bid}" in key_name:
  172. if bid is None:
  173. return False
  174. key_name = key_name.format(bid=bid)
  175. else:
  176. if bid is not None:
  177. return False
  178. return name == (key_name + suffix)
  179. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  180. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  181. if new_name is None:
  182. raise ValueError(f"Can not map tensor {name!r}")
  183. return new_name
  184. def set_gguf_parameters(self):
  185. self.gguf_writer.add_block_count(self.block_count)
  186. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
  187. self.gguf_writer.add_context_length(n_ctx)
  188. logger.info(f"gguf: context length = {n_ctx}")
  189. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  190. self.gguf_writer.add_embedding_length(n_embd)
  191. logger.info(f"gguf: embedding length = {n_embd}")
  192. if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
  193. self.gguf_writer.add_feed_forward_length(n_ff)
  194. logger.info(f"gguf: feed forward length = {n_ff}")
  195. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  196. self.gguf_writer.add_head_count(n_head)
  197. logger.info(f"gguf: head count = {n_head}")
  198. if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
  199. self.gguf_writer.add_head_count_kv(n_head_kv)
  200. logger.info(f"gguf: key-value head count = {n_head_kv}")
  201. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  202. self.gguf_writer.add_rope_freq_base(rope_theta)
  203. logger.info(f"gguf: rope theta = {rope_theta}")
  204. if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
  205. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  206. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  207. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  208. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  209. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  210. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  211. self.gguf_writer.add_expert_count(n_experts)
  212. logger.info(f"gguf: expert count = {n_experts}")
  213. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  214. self.gguf_writer.add_expert_used_count(n_experts_used)
  215. logger.info(f"gguf: experts used count = {n_experts_used}")
  216. if (head_dim := self.hparams.get("head_dim")) is not None:
  217. self.gguf_writer.add_key_length(head_dim)
  218. self.gguf_writer.add_value_length(head_dim)
  219. self.gguf_writer.add_file_type(self.ftype)
  220. logger.info(f"gguf: file type = {self.ftype}")
  221. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  222. del bid # unused
  223. return [(self.map_tensor_name(name), data_torch)]
  224. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  225. del name, new_name, bid, n_dims # unused
  226. return False
  227. # some models need extra generated tensors (like rope_freqs)
  228. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  229. return ()
  230. def prepare_tensors(self):
  231. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  232. for name, data_torch in chain(self.generate_extra_tensors(), self.get_tensors()):
  233. # we don't need these
  234. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  235. continue
  236. old_dtype = data_torch.dtype
  237. # convert any unsupported data types to float32
  238. if data_torch.dtype not in (torch.float16, torch.float32):
  239. data_torch = data_torch.to(torch.float32)
  240. # use the first number-like part of the tensor name as the block id
  241. bid = None
  242. for part in name.split("."):
  243. if part.isdecimal():
  244. bid = int(part)
  245. break
  246. for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
  247. data = data_torch.squeeze().numpy()
  248. # if data ends up empty, it means data_torch was a scalar tensor -> restore
  249. if len(data.shape) == 0:
  250. data = data_torch.numpy()
  251. n_dims = len(data.shape)
  252. data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
  253. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  254. if n_dims <= 1 or new_name.endswith("_norm.weight"):
  255. data_qtype = gguf.GGMLQuantizationType.F32
  256. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  257. # Some tensor types are always in float32
  258. if data_qtype is False and (
  259. any(
  260. self.match_model_tensor_name(new_name, key, bid)
  261. for key in (
  262. gguf.MODEL_TENSOR.FFN_GATE_INP,
  263. gguf.MODEL_TENSOR.POS_EMBD,
  264. gguf.MODEL_TENSOR.TOKEN_TYPES,
  265. gguf.MODEL_TENSOR.SSM_CONV1D,
  266. gguf.MODEL_TENSOR.TIME_MIX_FIRST,
  267. gguf.MODEL_TENSOR.TIME_MIX_W1,
  268. gguf.MODEL_TENSOR.TIME_MIX_W2,
  269. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
  270. gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
  271. )
  272. )
  273. or not new_name.endswith(".weight")
  274. ):
  275. data_qtype = gguf.GGMLQuantizationType.F32
  276. if data_qtype is False and any(
  277. self.match_model_tensor_name(new_name, key, bid)
  278. for key in (
  279. gguf.MODEL_TENSOR.TOKEN_EMBD,
  280. gguf.MODEL_TENSOR.OUTPUT,
  281. )
  282. ):
  283. if self.ftype in (
  284. gguf.LlamaFileType.MOSTLY_TQ1_0,
  285. gguf.LlamaFileType.MOSTLY_TQ2_0,
  286. ):
  287. # TODO: use Q4_K and Q6_K
  288. data_qtype = gguf.GGMLQuantizationType.F16
  289. # No override (data_qtype is False), or wants to be quantized (data_qtype is True)
  290. if isinstance(data_qtype, bool):
  291. if self.ftype == gguf.LlamaFileType.ALL_F32:
  292. data_qtype = gguf.GGMLQuantizationType.F32
  293. elif self.ftype == gguf.LlamaFileType.MOSTLY_F16:
  294. data_qtype = gguf.GGMLQuantizationType.F16
  295. elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  296. data_qtype = gguf.GGMLQuantizationType.BF16
  297. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
  298. data_qtype = gguf.GGMLQuantizationType.Q8_0
  299. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0:
  300. data_qtype = gguf.GGMLQuantizationType.TQ1_0
  301. elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0:
  302. data_qtype = gguf.GGMLQuantizationType.TQ2_0
  303. else:
  304. raise ValueError(f"Unknown file type: {self.ftype.name}")
  305. try:
  306. data = gguf.quants.quantize(data, data_qtype)
  307. except gguf.QuantError as e:
  308. logger.warning("%s, %s", e, "falling back to F16")
  309. data_qtype = gguf.GGMLQuantizationType.F16
  310. data = gguf.quants.quantize(data, data_qtype)
  311. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  312. # reverse shape to make it similar to the internal ggml dimension order
  313. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  314. # n_dims is implicit in the shape
  315. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  316. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  317. def set_type(self):
  318. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  319. def prepare_metadata(self, vocab_only: bool):
  320. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  321. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  322. # Fallback to model directory name if metadata name is still missing
  323. if self.metadata.name is None:
  324. self.metadata.name = self.dir_model.name
  325. # Generate parameter weight class (useful for leader boards) if not yet determined
  326. if self.metadata.size_label is None and total_params > 0:
  327. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  328. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  329. output_type: str = self.ftype.name.partition("_")[2]
  330. # Filename Output
  331. if self.fname_out.is_dir():
  332. # Generate default filename based on model specification and available metadata
  333. if not vocab_only:
  334. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  335. else:
  336. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  337. # Use the default filename
  338. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  339. else:
  340. # Output path is a custom defined templated filename
  341. # Note: `not is_dir()` is used because `.is_file()` will not detect
  342. # file template strings as it doesn't actually exist as a file
  343. # Process templated file name with the output ftype, useful with the "auto" ftype
  344. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  345. self.set_type()
  346. logger.info("Set meta model")
  347. self.metadata.set_gguf_meta_model(self.gguf_writer)
  348. logger.info("Set model parameters")
  349. self.set_gguf_parameters()
  350. logger.info("Set model tokenizer")
  351. self.set_vocab()
  352. logger.info("Set model quantization version")
  353. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  354. def write(self):
  355. self.prepare_tensors()
  356. self.prepare_metadata(vocab_only=False)
  357. self.gguf_writer.write_header_to_file(path=self.fname_out)
  358. self.gguf_writer.write_kv_data_to_file()
  359. self.gguf_writer.write_tensors_to_file(progress=True)
  360. self.gguf_writer.close()
  361. def write_vocab(self):
  362. if len(self.gguf_writer.tensors) != 1:
  363. raise ValueError('Splitting the vocabulary is not supported')
  364. self.prepare_metadata(vocab_only=True)
  365. self.gguf_writer.write_header_to_file(path=self.fname_out)
  366. self.gguf_writer.write_kv_data_to_file()
  367. self.gguf_writer.close()
  368. @staticmethod
  369. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  370. part_names: list[str] = []
  371. for filename in os.listdir(dir_model):
  372. if filename.startswith(prefix) and filename.endswith(suffix):
  373. part_names.append(filename)
  374. part_names.sort()
  375. return part_names
  376. @staticmethod
  377. def load_hparams(dir_model: Path):
  378. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  379. return json.load(f)
  380. @classmethod
  381. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  382. assert names
  383. def func(modelcls: AnyModel) -> AnyModel:
  384. for name in names:
  385. cls._model_classes[name] = modelcls
  386. return modelcls
  387. return func
  388. @classmethod
  389. def from_model_architecture(cls, arch: str) -> type[Model]:
  390. try:
  391. return cls._model_classes[arch]
  392. except KeyError:
  393. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  394. def does_token_look_special(self, token: str | bytes) -> bool:
  395. if isinstance(token, (bytes, bytearray)):
  396. token_text = token.decode(encoding="utf-8")
  397. elif isinstance(token, memoryview):
  398. token_text = token.tobytes().decode(encoding="utf-8")
  399. else:
  400. token_text = token
  401. # Some models mark some added tokens which ought to be control tokens as not special.
  402. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  403. seems_special = token_text in (
  404. "<pad>", # deepseek-coder
  405. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  406. )
  407. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  408. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  409. # TODO: should these be marked as UNUSED instead? (maybe not)
  410. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  411. return seems_special
  412. # used for GPT-2 BPE and WordPiece vocabs
  413. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  414. tokens: list[str] = []
  415. toktypes: list[int] = []
  416. from transformers import AutoTokenizer
  417. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  418. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  419. assert max(tokenizer.vocab.values()) < vocab_size
  420. tokpre = self.get_vocab_base_pre(tokenizer)
  421. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  422. added_vocab = tokenizer.get_added_vocab()
  423. for i in range(vocab_size):
  424. if i not in reverse_vocab:
  425. tokens.append(f"[PAD{i}]")
  426. toktypes.append(gguf.TokenType.UNUSED)
  427. else:
  428. token: str = reverse_vocab[i]
  429. if token in added_vocab:
  430. if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
  431. toktypes.append(gguf.TokenType.CONTROL)
  432. else:
  433. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  434. toktypes.append(gguf.TokenType.USER_DEFINED)
  435. else:
  436. toktypes.append(gguf.TokenType.NORMAL)
  437. tokens.append(token)
  438. return tokens, toktypes, tokpre
  439. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  440. # do not modify it manually!
  441. # ref: https://github.com/ggerganov/llama.cpp/pull/6920
  442. # Marker: Start get_vocab_base_pre
  443. def get_vocab_base_pre(self, tokenizer) -> str:
  444. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  445. # is specific for the BPE pre-tokenizer used by the model
  446. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  447. # use in llama.cpp to implement the same pre-tokenizer
  448. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  449. chktok = tokenizer.encode(chktxt)
  450. chkhsh = sha256(str(chktok).encode()).hexdigest()
  451. logger.debug(f"chktok: {chktok}")
  452. logger.debug(f"chkhsh: {chkhsh}")
  453. res = None
  454. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  455. # or pull the latest version of the model from Huggingface
  456. # don't edit the hashes manually!
  457. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  458. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  459. res = "llama-bpe"
  460. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  461. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  462. res = "deepseek-llm"
  463. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  464. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  465. res = "deepseek-coder"
  466. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  467. # ref: https://huggingface.co/tiiuae/falcon-7b
  468. res = "falcon"
  469. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  470. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  471. res = "bert-bge"
  472. if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
  473. # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
  474. res = "bert-bge-large"
  475. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  476. # ref: https://huggingface.co/mosaicml/mpt-7b
  477. res = "mpt"
  478. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  479. # ref: https://huggingface.co/bigcode/starcoder2-3b
  480. res = "starcoder"
  481. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  482. # ref: https://huggingface.co/openai-community/gpt2
  483. res = "gpt-2"
  484. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  485. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  486. res = "stablelm2"
  487. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  488. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  489. res = "refact"
  490. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  491. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  492. res = "command-r"
  493. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  494. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  495. res = "qwen2"
  496. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  497. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  498. res = "olmo"
  499. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  500. # ref: https://huggingface.co/databricks/dbrx-base
  501. res = "dbrx"
  502. if chkhsh == "c7699093ba4255a91e702aa38a596aa81669f3525dae06c2953267dde580f448":
  503. # ref: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  504. res = "jina-v1-en"
  505. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  506. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  507. res = "jina-v2-en"
  508. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  509. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  510. res = "jina-v2-es"
  511. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  512. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  513. res = "jina-v2-de"
  514. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  515. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  516. res = "smaug-bpe"
  517. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  518. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  519. res = "poro-chat"
  520. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  521. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  522. res = "jina-v2-code"
  523. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
  524. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  525. res = "chatglm-bpe"
  526. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  527. # ref: https://huggingface.co/LumiOpen/Viking-7B
  528. res = "viking"
  529. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  530. # ref: https://huggingface.co/core42/jais-13b
  531. res = "jais"
  532. if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f":
  533. # ref: https://huggingface.co/WisdomShell/CodeShell-7B
  534. res = "codeshell"
  535. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  536. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  537. res = "tekken"
  538. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  539. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  540. res = "smollm"
  541. if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
  542. # ref: https://huggingface.co/bigscience/bloom
  543. res = "bloom"
  544. if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
  545. # ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
  546. res = "gpt3-finnish"
  547. if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
  548. # ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
  549. res = "exaone"
  550. if chkhsh == "fcace8b9cac38ce847670c970cd5892031a753a1ef381abd1d9af00f713da085":
  551. # ref: https://huggingface.co/microsoft/phi-2
  552. res = "phi-2"
  553. if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
  554. # ref: https://huggingface.co/facebook/chameleon-7b
  555. res = "chameleon"
  556. if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
  557. # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
  558. res = "minerva-7b"
  559. if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65":
  560. # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base
  561. res = "roberta-bpe"
  562. if res is None:
  563. logger.warning("\n")
  564. logger.warning("**************************************************************************************")
  565. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  566. logger.warning("** There are 2 possible reasons for this:")
  567. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  568. logger.warning("** - the pre-tokenization config has changed upstream")
  569. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  570. logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
  571. logger.warning("**")
  572. logger.warning(f"** chkhsh: {chkhsh}")
  573. logger.warning("**************************************************************************************")
  574. logger.warning("\n")
  575. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  576. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  577. logger.debug(f"chkhsh: {chkhsh}")
  578. return res
  579. # Marker: End get_vocab_base_pre
  580. def _set_vocab_gpt2(self) -> None:
  581. tokens, toktypes, tokpre = self.get_vocab_base()
  582. self.gguf_writer.add_tokenizer_model("gpt2")
  583. self.gguf_writer.add_tokenizer_pre(tokpre)
  584. self.gguf_writer.add_token_list(tokens)
  585. self.gguf_writer.add_token_types(toktypes)
  586. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  587. special_vocab.add_to_gguf(self.gguf_writer)
  588. def _set_vocab_qwen(self):
  589. dir_model = self.dir_model
  590. hparams = self.hparams
  591. tokens: list[str] = []
  592. toktypes: list[int] = []
  593. from transformers import AutoTokenizer
  594. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  595. vocab_size = hparams["vocab_size"]
  596. assert max(tokenizer.get_vocab().values()) < vocab_size
  597. tokpre = self.get_vocab_base_pre(tokenizer)
  598. merges = []
  599. vocab = {}
  600. mergeable_ranks = tokenizer.mergeable_ranks
  601. for token, rank in mergeable_ranks.items():
  602. vocab[QwenModel.token_bytes_to_string(token)] = rank
  603. if len(token) == 1:
  604. continue
  605. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  606. assert len(merged) == 2
  607. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  608. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  609. added_vocab = tokenizer.special_tokens
  610. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  611. for i in range(vocab_size):
  612. if i not in reverse_vocab:
  613. tokens.append(f"[PAD{i}]")
  614. toktypes.append(gguf.TokenType.UNUSED)
  615. elif reverse_vocab[i] in added_vocab:
  616. tokens.append(reverse_vocab[i])
  617. toktypes.append(gguf.TokenType.CONTROL)
  618. else:
  619. tokens.append(reverse_vocab[i])
  620. toktypes.append(gguf.TokenType.NORMAL)
  621. self.gguf_writer.add_tokenizer_model("gpt2")
  622. self.gguf_writer.add_tokenizer_pre(tokpre)
  623. self.gguf_writer.add_token_list(tokens)
  624. self.gguf_writer.add_token_types(toktypes)
  625. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  626. special_vocab.merges = merges
  627. # only add special tokens when they were not already loaded from config.json
  628. if len(special_vocab.special_token_ids) == 0:
  629. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  630. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  631. # this one is usually not in config.json anyway
  632. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  633. special_vocab.add_to_gguf(self.gguf_writer)
  634. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  635. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  636. self.gguf_writer.add_tokenizer_model("llama")
  637. self.gguf_writer.add_tokenizer_pre("default")
  638. self.gguf_writer.add_token_list(tokens)
  639. self.gguf_writer.add_token_scores(scores)
  640. self.gguf_writer.add_token_types(toktypes)
  641. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  642. special_vocab.add_to_gguf(self.gguf_writer)
  643. def _create_vocab_sentencepiece(self):
  644. from sentencepiece import SentencePieceProcessor
  645. tokenizer_path = self.dir_model / 'tokenizer.model'
  646. if not tokenizer_path.is_file():
  647. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  648. tokenizer = SentencePieceProcessor()
  649. tokenizer.LoadFromFile(str(tokenizer_path))
  650. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  651. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  652. scores: list[float] = [-10000.0] * vocab_size
  653. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  654. for token_id in range(tokenizer.vocab_size()):
  655. piece = tokenizer.IdToPiece(token_id)
  656. text = piece.encode("utf-8")
  657. score = tokenizer.GetScore(token_id)
  658. toktype = SentencePieceTokenTypes.NORMAL
  659. if tokenizer.IsUnknown(token_id):
  660. toktype = SentencePieceTokenTypes.UNKNOWN
  661. elif tokenizer.IsControl(token_id):
  662. toktype = SentencePieceTokenTypes.CONTROL
  663. elif tokenizer.IsUnused(token_id):
  664. toktype = SentencePieceTokenTypes.UNUSED
  665. elif tokenizer.IsByte(token_id):
  666. toktype = SentencePieceTokenTypes.BYTE
  667. tokens[token_id] = text
  668. scores[token_id] = score
  669. toktypes[token_id] = toktype
  670. added_tokens_file = self.dir_model / 'added_tokens.json'
  671. if added_tokens_file.is_file():
  672. with open(added_tokens_file, "r", encoding="utf-8") as f:
  673. added_tokens_json = json.load(f)
  674. for key in added_tokens_json:
  675. token_id = added_tokens_json[key]
  676. if token_id >= vocab_size:
  677. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  678. continue
  679. tokens[token_id] = key.encode("utf-8")
  680. scores[token_id] = -1000.0
  681. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  682. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  683. if tokenizer_config_file.is_file():
  684. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  685. tokenizer_config_json = json.load(f)
  686. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  687. for token_id, token_data in added_tokens_decoder.items():
  688. token_id = int(token_id)
  689. token: str = token_data["content"]
  690. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  691. if tokens[token_id] != token.encode("utf-8"):
  692. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  693. if token_data.get("special") or self.does_token_look_special(token):
  694. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  695. else:
  696. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  697. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  698. scores[token_id] = -1000.0
  699. tokens[token_id] = token.encode("utf-8")
  700. if vocab_size > len(tokens):
  701. pad_count = vocab_size - len(tokens)
  702. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  703. for i in range(1, pad_count + 1):
  704. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  705. scores.append(-1000.0)
  706. toktypes.append(SentencePieceTokenTypes.UNUSED)
  707. return tokens, scores, toktypes
  708. def _set_vocab_llama_hf(self):
  709. vocab = gguf.LlamaHfVocab(self.dir_model)
  710. tokens = []
  711. scores = []
  712. toktypes = []
  713. for text, score, toktype in vocab.all_tokens():
  714. tokens.append(text)
  715. scores.append(score)
  716. toktypes.append(toktype)
  717. assert len(tokens) == vocab.vocab_size
  718. self.gguf_writer.add_tokenizer_model("llama")
  719. self.gguf_writer.add_tokenizer_pre("default")
  720. self.gguf_writer.add_token_list(tokens)
  721. self.gguf_writer.add_token_scores(scores)
  722. self.gguf_writer.add_token_types(toktypes)
  723. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  724. special_vocab.add_to_gguf(self.gguf_writer)
  725. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  726. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  727. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  728. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  729. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  730. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  731. assert field # tokenizer model
  732. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  733. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  734. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  735. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  736. assert field # token list
  737. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  738. if model_name == "llama-spm":
  739. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  740. assert field # token scores
  741. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  742. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  743. assert field # token types
  744. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  745. if model_name != "llama-spm":
  746. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  747. assert field # token merges
  748. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  749. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  750. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  751. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  752. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  753. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  754. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  755. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  756. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  757. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  758. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  759. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  760. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  761. @Model.register("GPTNeoXForCausalLM")
  762. class GPTNeoXModel(Model):
  763. model_arch = gguf.MODEL_ARCH.GPTNEOX
  764. def set_gguf_parameters(self):
  765. block_count = self.hparams["num_hidden_layers"]
  766. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  767. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  768. self.gguf_writer.add_block_count(block_count)
  769. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  770. self.gguf_writer.add_rope_dimension_count(
  771. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  772. )
  773. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  774. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  775. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  776. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  777. del bid # unused
  778. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  779. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  780. tensors: list[tuple[str, Tensor]] = []
  781. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  782. # Map bloom-style qkv_linear to gpt-style qkv_linear
  783. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  784. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  785. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  786. data_torch = torch.cat(
  787. (
  788. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  789. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  790. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  791. ),
  792. dim=0,
  793. )
  794. logger.info("re-format attention.linear_qkv.weight")
  795. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  796. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  797. data_torch = torch.cat(
  798. (
  799. qkv_bias[:, 0, :].reshape((n_embed,)),
  800. qkv_bias[:, 1, :].reshape((n_embed,)),
  801. qkv_bias[:, 2, :].reshape((n_embed,)),
  802. ),
  803. dim=0,
  804. )
  805. logger.info("re-format attention.linear_qkv.bias")
  806. tensors.append((self.map_tensor_name(name), data_torch))
  807. return tensors
  808. @Model.register("BloomForCausalLM", "BloomModel")
  809. class BloomModel(Model):
  810. model_arch = gguf.MODEL_ARCH.BLOOM
  811. def set_gguf_parameters(self):
  812. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  813. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  814. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  815. self.gguf_writer.add_embedding_length(n_embed)
  816. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  817. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  818. self.gguf_writer.add_head_count(n_head)
  819. self.gguf_writer.add_head_count_kv(n_head)
  820. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  821. self.gguf_writer.add_file_type(self.ftype)
  822. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  823. del bid # unused
  824. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  825. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  826. name = re.sub(r'transformer\.', '', name)
  827. tensors: list[tuple[str, Tensor]] = []
  828. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  829. # Map bloom-style qkv_linear to gpt-style qkv_linear
  830. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  831. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  832. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  833. data_torch = torch.cat(
  834. (
  835. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  836. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  837. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  838. ),
  839. dim=0,
  840. )
  841. logger.info("re-format attention.linear_qkv.weight")
  842. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  843. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  844. data_torch = torch.cat(
  845. (
  846. qkv_bias[:, 0, :].reshape((n_embed,)),
  847. qkv_bias[:, 1, :].reshape((n_embed,)),
  848. qkv_bias[:, 2, :].reshape((n_embed,)),
  849. ),
  850. dim=0,
  851. )
  852. logger.info("re-format attention.linear_qkv.bias")
  853. tensors.append((self.map_tensor_name(name), data_torch))
  854. if name == "word_embeddings.weight":
  855. assert self.tensor_names is not None
  856. # TODO: tie them at runtime, don't duplicate in the model file
  857. if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
  858. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  859. return tensors
  860. @Model.register("MPTForCausalLM")
  861. class MPTModel(Model):
  862. model_arch = gguf.MODEL_ARCH.MPT
  863. def set_vocab(self):
  864. try:
  865. self._set_vocab_gpt2()
  866. except Exception:
  867. # Fallback for SEA-LION model
  868. self._set_vocab_sentencepiece()
  869. self.gguf_writer.add_add_bos_token(False)
  870. self.gguf_writer.add_pad_token_id(3)
  871. self.gguf_writer.add_eos_token_id(1)
  872. self.gguf_writer.add_unk_token_id(0)
  873. def set_gguf_parameters(self):
  874. block_count = self.hparams["n_layers"]
  875. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  876. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  877. self.gguf_writer.add_block_count(block_count)
  878. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  879. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  880. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  881. self.gguf_writer.add_head_count_kv(kv_n_heads)
  882. self.gguf_writer.add_layer_norm_eps(1e-5)
  883. if self.hparams["attn_config"]["clip_qkv"] is not None:
  884. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  885. if self.hparams["attn_config"]["alibi"]:
  886. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  887. else:
  888. self.gguf_writer.add_max_alibi_bias(0.0)
  889. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  890. del bid # unused
  891. if "scales" in name:
  892. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  893. new_name = new_name.replace("scales", "act.scales")
  894. else:
  895. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  896. return [(new_name, data_torch)]
  897. @Model.register("OrionForCausalLM")
  898. class OrionModel(Model):
  899. model_arch = gguf.MODEL_ARCH.ORION
  900. def set_vocab(self):
  901. self._set_vocab_sentencepiece()
  902. def set_gguf_parameters(self):
  903. block_count = self.hparams["num_hidden_layers"]
  904. head_count = self.hparams["num_attention_heads"]
  905. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  906. ctx_length = 0
  907. if "max_sequence_length" in self.hparams:
  908. ctx_length = self.hparams["max_sequence_length"]
  909. elif "max_position_embeddings" in self.hparams:
  910. ctx_length = self.hparams["max_position_embeddings"]
  911. elif "model_max_length" in self.hparams:
  912. ctx_length = self.hparams["model_max_length"]
  913. else:
  914. raise ValueError("gguf: can not find ctx length parameter.")
  915. self.gguf_writer.add_file_type(self.ftype)
  916. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  917. self.gguf_writer.add_context_length(ctx_length)
  918. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  919. self.gguf_writer.add_block_count(block_count)
  920. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  921. self.gguf_writer.add_head_count(head_count)
  922. self.gguf_writer.add_head_count_kv(head_count_kv)
  923. # note: config provides rms norm but it is actually layer norm
  924. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  925. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  926. @Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  927. class BaichuanModel(Model):
  928. model_arch = gguf.MODEL_ARCH.BAICHUAN
  929. def set_vocab(self):
  930. self._set_vocab_sentencepiece()
  931. def set_gguf_parameters(self):
  932. block_count = self.hparams["num_hidden_layers"]
  933. head_count = self.hparams["num_attention_heads"]
  934. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  935. ctx_length = 0
  936. if "max_sequence_length" in self.hparams:
  937. ctx_length = self.hparams["max_sequence_length"]
  938. elif "max_position_embeddings" in self.hparams:
  939. ctx_length = self.hparams["max_position_embeddings"]
  940. elif "model_max_length" in self.hparams:
  941. ctx_length = self.hparams["model_max_length"]
  942. else:
  943. raise ValueError("gguf: can not find ctx length parameter.")
  944. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  945. self.gguf_writer.add_context_length(ctx_length)
  946. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  947. self.gguf_writer.add_block_count(block_count)
  948. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  949. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  950. self.gguf_writer.add_head_count(head_count)
  951. self.gguf_writer.add_head_count_kv(head_count_kv)
  952. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  953. self.gguf_writer.add_file_type(self.ftype)
  954. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  955. if self.hparams["rope_scaling"].get("type") == "linear":
  956. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  957. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  958. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  959. head_count = self.hparams["num_attention_heads"]
  960. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  961. tensors: list[tuple[str, Tensor]] = []
  962. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  963. logger.info(f"Unpacking and permuting layer {bid}")
  964. tensors = [
  965. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  966. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  967. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  968. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  969. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  970. self._reverse_hf_part(data_torch, 2)),
  971. ]
  972. else:
  973. tensors = [(self.map_tensor_name(name), data_torch)]
  974. return tensors
  975. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  976. if n_kv_head is not None and n_head != n_kv_head:
  977. n_head //= n_kv_head
  978. return (
  979. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  980. .swapaxes(1, 2)
  981. .reshape(weights.shape)
  982. )
  983. def _reverse_hf_permute_part(
  984. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  985. ) -> Tensor:
  986. r = weights.shape[0] // 3
  987. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  988. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  989. r = weights.shape[0] // 3
  990. return weights[r * n_part:r * n_part + r, ...]
  991. @Model.register("XverseForCausalLM")
  992. class XverseModel(Model):
  993. model_arch = gguf.MODEL_ARCH.XVERSE
  994. def set_vocab(self):
  995. assert (self.dir_model / "tokenizer.json").is_file()
  996. dir_model = self.dir_model
  997. hparams = self.hparams
  998. tokens: list[bytes] = []
  999. toktypes: list[int] = []
  1000. from transformers import AutoTokenizer
  1001. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  1002. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  1003. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  1004. # because vocab_size is the count of items, and indexes start at 0.
  1005. max_vocab_index = max(tokenizer.get_vocab().values())
  1006. if max_vocab_index >= vocab_size:
  1007. raise ValueError("Vocabulary size exceeds expected maximum size.")
  1008. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  1009. added_vocab = tokenizer.get_added_vocab()
  1010. for token_id in range(vocab_size):
  1011. token_text = reverse_vocab[token_id].encode('utf-8')
  1012. # replace "\x00" to string with length > 0
  1013. if token_text == b"\x00":
  1014. toktype = gguf.TokenType.BYTE # special
  1015. token_text = f"<{token_text}>".encode('utf-8')
  1016. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  1017. toktype = gguf.TokenType.BYTE # special
  1018. elif reverse_vocab[token_id] in added_vocab:
  1019. if tokenizer.added_tokens_decoder[token_id].special:
  1020. toktype = gguf.TokenType.CONTROL
  1021. else:
  1022. toktype = gguf.TokenType.USER_DEFINED
  1023. else:
  1024. toktype = gguf.TokenType.NORMAL
  1025. tokens.append(token_text)
  1026. toktypes.append(toktype)
  1027. self.gguf_writer.add_tokenizer_model("llama")
  1028. self.gguf_writer.add_tokenizer_pre("default")
  1029. self.gguf_writer.add_token_list(tokens)
  1030. self.gguf_writer.add_token_types(toktypes)
  1031. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  1032. special_vocab.add_to_gguf(self.gguf_writer)
  1033. def set_gguf_parameters(self):
  1034. block_count = self.hparams["num_hidden_layers"]
  1035. head_count = self.hparams["num_attention_heads"]
  1036. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1037. ctx_length = 0
  1038. if "max_sequence_length" in self.hparams:
  1039. ctx_length = self.hparams["max_sequence_length"]
  1040. elif "max_position_embeddings" in self.hparams:
  1041. ctx_length = self.hparams["max_position_embeddings"]
  1042. elif "model_max_length" in self.hparams:
  1043. ctx_length = self.hparams["model_max_length"]
  1044. else:
  1045. raise ValueError("gguf: can not find ctx length parameter.")
  1046. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  1047. self.gguf_writer.add_context_length(ctx_length)
  1048. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1049. self.gguf_writer.add_block_count(block_count)
  1050. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1051. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1052. self.gguf_writer.add_head_count(head_count)
  1053. self.gguf_writer.add_head_count_kv(head_count_kv)
  1054. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1055. self.gguf_writer.add_file_type(self.ftype)
  1056. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1057. if self.hparams["rope_scaling"].get("type") == "linear":
  1058. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1059. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1060. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1061. del bid # unused
  1062. head_count = self.hparams["num_attention_heads"]
  1063. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  1064. # HF models permute some of the tensors, so we need to undo that
  1065. if name.endswith("q_proj.weight"):
  1066. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1067. if name.endswith("k_proj.weight"):
  1068. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1069. return [(self.map_tensor_name(name), data_torch)]
  1070. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1071. if n_kv_head is not None and n_head != n_kv_head:
  1072. n_head //= n_kv_head
  1073. return (
  1074. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1075. .swapaxes(1, 2)
  1076. .reshape(weights.shape)
  1077. )
  1078. @Model.register("FalconForCausalLM", "RWForCausalLM")
  1079. class FalconModel(Model):
  1080. model_arch = gguf.MODEL_ARCH.FALCON
  1081. def set_gguf_parameters(self):
  1082. block_count = self.hparams.get("num_hidden_layers")
  1083. if block_count is None:
  1084. block_count = self.hparams["n_layer"] # old name
  1085. n_head = self.hparams.get("num_attention_heads")
  1086. if n_head is None:
  1087. n_head = self.hparams["n_head"] # old name
  1088. n_head_kv = self.hparams.get("num_kv_heads")
  1089. if n_head_kv is None:
  1090. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1091. self.gguf_writer.add_context_length(2048) # not in config.json
  1092. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1093. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1094. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1095. self.gguf_writer.add_block_count(block_count)
  1096. self.gguf_writer.add_head_count(n_head)
  1097. self.gguf_writer.add_head_count_kv(n_head_kv)
  1098. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1099. self.gguf_writer.add_file_type(self.ftype)
  1100. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1101. del bid # unused
  1102. # QKV tensor transform
  1103. # The original query_key_value tensor contains n_head_kv "kv groups",
  1104. # each consisting of n_head/n_head_kv query weights followed by one key
  1105. # and one value weight (shared by all query heads in the kv group).
  1106. # This layout makes it a big pain to work with in GGML.
  1107. # So we rearrange them here,, so that we have n_head query weights
  1108. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1109. # in contiguous fashion.
  1110. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1111. if "query_key_value" in name:
  1112. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1113. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1114. head_dim = self.hparams["hidden_size"] // n_head
  1115. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1116. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1117. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1118. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1119. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1120. return [(self.map_tensor_name(name), data_torch)]
  1121. @Model.register("GPTBigCodeForCausalLM")
  1122. class StarCoderModel(Model):
  1123. model_arch = gguf.MODEL_ARCH.STARCODER
  1124. def set_gguf_parameters(self):
  1125. block_count = self.hparams["n_layer"]
  1126. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1127. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1128. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1129. self.gguf_writer.add_block_count(block_count)
  1130. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1131. self.gguf_writer.add_head_count_kv(1)
  1132. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1133. self.gguf_writer.add_file_type(self.ftype)
  1134. @Model.register("GPTRefactForCausalLM")
  1135. class RefactModel(Model):
  1136. model_arch = gguf.MODEL_ARCH.REFACT
  1137. def set_vocab(self):
  1138. super().set_vocab()
  1139. # TODO: how to determine special FIM tokens automatically?
  1140. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1141. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1142. special_vocab._set_special_token("prefix", 1)
  1143. special_vocab._set_special_token("suffix", 3)
  1144. special_vocab._set_special_token("middle", 2)
  1145. special_vocab.chat_template = None # do not add it twice
  1146. special_vocab.add_to_gguf(self.gguf_writer)
  1147. def set_gguf_parameters(self):
  1148. hidden_dim = self.hparams["n_embd"]
  1149. inner_dim = 4 * hidden_dim
  1150. hidden_dim = int(2 * inner_dim / 3)
  1151. multiple_of = 256
  1152. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1153. block_count = self.hparams["n_layer"]
  1154. # refact uses Alibi. So this is from config.json which might be used by training.
  1155. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1156. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1157. self.gguf_writer.add_feed_forward_length(ff_dim)
  1158. self.gguf_writer.add_block_count(block_count)
  1159. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1160. self.gguf_writer.add_head_count_kv(1)
  1161. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1162. self.gguf_writer.add_file_type(self.ftype)
  1163. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1164. hidden_dim = self.hparams["n_embd"]
  1165. inner_dim = 4 * hidden_dim
  1166. hidden_dim = int(2 * inner_dim / 3)
  1167. multiple_of = 256
  1168. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1169. n_head = self.hparams["n_head"]
  1170. n_head_kv = 1
  1171. head_dim = self.hparams["n_embd"] // n_head
  1172. tensors: list[tuple[str, Tensor]] = []
  1173. if bid is not None:
  1174. if name == f"transformer.h.{bid}.attn.kv.weight":
  1175. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1176. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1177. elif name == f"transformer.h.{bid}.attn.q.weight":
  1178. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1179. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1180. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1181. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1182. if len(tensors) == 0:
  1183. tensors.append((self.map_tensor_name(name), data_torch))
  1184. return tensors
  1185. @Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1186. class StableLMModel(Model):
  1187. model_arch = gguf.MODEL_ARCH.STABLELM
  1188. def set_vocab(self):
  1189. if (self.dir_model / "tokenizer.json").is_file():
  1190. self._set_vocab_gpt2()
  1191. else:
  1192. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1193. self._set_vocab_qwen()
  1194. def set_gguf_parameters(self):
  1195. hparams = self.hparams
  1196. block_count = hparams["num_hidden_layers"]
  1197. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1198. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1199. self.gguf_writer.add_block_count(block_count)
  1200. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1201. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1202. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1203. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1204. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1205. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1206. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1207. self.gguf_writer.add_file_type(self.ftype)
  1208. _q_norms: list[dict[str, Tensor]] | None = None
  1209. _k_norms: list[dict[str, Tensor]] | None = None
  1210. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1211. n_head = self.hparams["num_attention_heads"]
  1212. n_kv_head = self.hparams["num_key_value_heads"]
  1213. if name.find("q_layernorm.norms") != -1:
  1214. assert bid is not None
  1215. if self._q_norms is None:
  1216. self._q_norms = [{} for _ in range(self.block_count)]
  1217. self._q_norms[bid][name] = data_torch
  1218. if len(self._q_norms[bid]) >= n_head:
  1219. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1220. else:
  1221. return []
  1222. if name.find("k_layernorm.norms") != -1:
  1223. assert bid is not None
  1224. if self._k_norms is None:
  1225. self._k_norms = [{} for _ in range(self.block_count)]
  1226. self._k_norms[bid][name] = data_torch
  1227. if len(self._k_norms[bid]) >= n_kv_head:
  1228. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1229. else:
  1230. return []
  1231. return [(self.map_tensor_name(name), data_torch)]
  1232. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1233. datas: list[Tensor] = []
  1234. # extract the norms in order
  1235. for xid in range(n_head):
  1236. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1237. datas.append(norms[ename])
  1238. del norms[ename]
  1239. data_torch = torch.stack(datas, dim=0)
  1240. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1241. new_name = self.map_tensor_name(merged_name)
  1242. return [(new_name, data_torch)]
  1243. def prepare_tensors(self):
  1244. super().prepare_tensors()
  1245. if self._q_norms is not None or self._k_norms is not None:
  1246. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1247. norms = (
  1248. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1249. ) + (
  1250. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1251. )
  1252. if len(norms) > 0:
  1253. raise ValueError(f"Unprocessed norms: {norms}")
  1254. @Model.register("LLaMAForCausalLM", "LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
  1255. class LlamaModel(Model):
  1256. model_arch = gguf.MODEL_ARCH.LLAMA
  1257. def set_vocab(self):
  1258. try:
  1259. self._set_vocab_sentencepiece()
  1260. except FileNotFoundError:
  1261. try:
  1262. self._set_vocab_llama_hf()
  1263. except (FileNotFoundError, TypeError):
  1264. # Llama 3
  1265. self._set_vocab_gpt2()
  1266. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1267. if self.hparams.get("vocab_size", 32000) == 32016:
  1268. special_vocab = gguf.SpecialVocab(
  1269. self.dir_model, load_merges=False,
  1270. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1271. )
  1272. special_vocab._set_special_token("prefix", 32007)
  1273. special_vocab._set_special_token("suffix", 32008)
  1274. special_vocab._set_special_token("middle", 32009)
  1275. special_vocab._set_special_token("eot", 32010)
  1276. special_vocab.add_to_gguf(self.gguf_writer)
  1277. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1278. if tokenizer_config_file.is_file():
  1279. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1280. tokenizer_config_json = json.load(f)
  1281. if "add_prefix_space" in tokenizer_config_json:
  1282. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1283. # Apply to granite small models only
  1284. if self.hparams.get("vocab_size", 32000) == 49152:
  1285. self.gguf_writer.add_add_bos_token(False)
  1286. def set_gguf_parameters(self):
  1287. super().set_gguf_parameters()
  1288. hparams = self.hparams
  1289. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1290. if "head_dim" in hparams:
  1291. rope_dim = hparams["head_dim"]
  1292. else:
  1293. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1294. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1295. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1296. if self.hparams["rope_scaling"].get("type") == "linear":
  1297. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1298. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1299. @staticmethod
  1300. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1301. if n_head_kv is not None and n_head != n_head_kv:
  1302. n_head = n_head_kv
  1303. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1304. .swapaxes(1, 2)
  1305. .reshape(weights.shape))
  1306. _experts: list[dict[str, Tensor]] | None = None
  1307. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1308. n_head = self.hparams["num_attention_heads"]
  1309. n_kv_head = self.hparams.get("num_key_value_heads")
  1310. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1311. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1312. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1313. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1314. # process the experts separately
  1315. if name.find("block_sparse_moe.experts") != -1:
  1316. n_experts = self.hparams["num_local_experts"]
  1317. assert bid is not None
  1318. if self._experts is None:
  1319. self._experts = [{} for _ in range(self.block_count)]
  1320. self._experts[bid][name] = data_torch
  1321. if len(self._experts[bid]) >= n_experts * 3:
  1322. tensors: list[tuple[str, Tensor]] = []
  1323. # merge the experts into a single 3d tensor
  1324. for wid in ["w1", "w2", "w3"]:
  1325. datas: list[Tensor] = []
  1326. for xid in range(n_experts):
  1327. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1328. datas.append(self._experts[bid][ename])
  1329. del self._experts[bid][ename]
  1330. data_torch = torch.stack(datas, dim=0)
  1331. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1332. new_name = self.map_tensor_name(merged_name)
  1333. tensors.append((new_name, data_torch))
  1334. return tensors
  1335. else:
  1336. return []
  1337. return [(self.map_tensor_name(name), data_torch)]
  1338. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1339. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  1340. if rope_scaling.get("rope_type", '').lower() == "llama3":
  1341. base = self.hparams.get("rope_theta", 10000.0)
  1342. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1343. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  1344. factor = rope_scaling.get("factor", 8.0)
  1345. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  1346. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  1347. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  1348. low_freq_wavelen = old_context_len / low_freq_factor
  1349. high_freq_wavelen = old_context_len / high_freq_factor
  1350. assert low_freq_wavelen != high_freq_wavelen
  1351. rope_factors = []
  1352. for freq in freqs:
  1353. wavelen = 2 * math.pi / freq
  1354. if wavelen < high_freq_wavelen:
  1355. rope_factors.append(1)
  1356. elif wavelen > low_freq_wavelen:
  1357. rope_factors.append(factor)
  1358. else:
  1359. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  1360. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  1361. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  1362. def prepare_tensors(self):
  1363. super().prepare_tensors()
  1364. if self._experts is not None:
  1365. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1366. experts = [k for d in self._experts for k in d.keys()]
  1367. if len(experts) > 0:
  1368. raise ValueError(f"Unprocessed experts: {experts}")
  1369. @Model.register("BitnetForCausalLM")
  1370. class BitnetModel(Model):
  1371. model_arch = gguf.MODEL_ARCH.BITNET
  1372. def set_vocab(self):
  1373. self._set_vocab_sentencepiece()
  1374. def set_gguf_parameters(self):
  1375. super().set_gguf_parameters()
  1376. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1377. self.gguf_writer.add_rope_scaling_factor(1.0)
  1378. def weight_quant(self, weight: Tensor) -> Tensor:
  1379. dtype = weight.dtype
  1380. weight = weight.float()
  1381. scale = weight.abs().mean().clamp(min=1e-5)
  1382. iscale = 1 / scale
  1383. # TODO: multiply by the scale directly instead of inverting it twice
  1384. # (this is also unnecessarily doubly inverted upstream)
  1385. # ref: https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/af89e318d78a70802061246bf037199d2fb97020/utils_quant.py#L10
  1386. result = (weight * iscale).round().clamp(-1, 1) / iscale
  1387. return result.type(dtype)
  1388. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1389. new_name = self.map_tensor_name(name)
  1390. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  1391. gguf.MODEL_TENSOR.ATTN_Q,
  1392. gguf.MODEL_TENSOR.ATTN_K,
  1393. gguf.MODEL_TENSOR.ATTN_V,
  1394. gguf.MODEL_TENSOR.ATTN_OUT,
  1395. gguf.MODEL_TENSOR.FFN_UP,
  1396. gguf.MODEL_TENSOR.FFN_DOWN,
  1397. gguf.MODEL_TENSOR.FFN_GATE,
  1398. ]):
  1399. # transform weight into 1/0/-1 (in fp32)
  1400. data_torch = self.weight_quant(data_torch)
  1401. yield (new_name, data_torch)
  1402. @Model.register("GrokForCausalLM")
  1403. class GrokModel(Model):
  1404. model_arch = gguf.MODEL_ARCH.GROK
  1405. def set_vocab(self):
  1406. self._set_vocab_sentencepiece()
  1407. def __init__(self, *args, **kwargs):
  1408. super().__init__(*args, **kwargs)
  1409. def set_gguf_parameters(self):
  1410. super().set_gguf_parameters()
  1411. _experts: list[dict[str, Tensor]] | None = None
  1412. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1413. # process the experts separately
  1414. if name.find(".moe.") != -1:
  1415. n_experts = self.hparams["num_local_experts"]
  1416. assert bid is not None
  1417. if self._experts is None:
  1418. self._experts = [{} for _ in range(self.block_count)]
  1419. self._experts[bid][name] = data_torch
  1420. if len(self._experts[bid]) >= n_experts * 3:
  1421. tensors: list[tuple[str, Tensor]] = []
  1422. # merge the experts into a single 3d tensor
  1423. for wid in ["linear", "linear_1", "linear_v"]:
  1424. datas: list[Tensor] = []
  1425. for xid in range(n_experts):
  1426. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
  1427. datas.append(self._experts[bid][ename])
  1428. del self._experts[bid][ename]
  1429. data_torch = torch.stack(datas, dim=0)
  1430. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
  1431. new_name = self.map_tensor_name(merged_name)
  1432. tensors.append((new_name, data_torch))
  1433. return tensors
  1434. else:
  1435. return []
  1436. return [(self.map_tensor_name(name), data_torch)]
  1437. @Model.register("DbrxForCausalLM")
  1438. class DbrxModel(Model):
  1439. model_arch = gguf.MODEL_ARCH.DBRX
  1440. def set_gguf_parameters(self):
  1441. ffn_config = self.hparams["ffn_config"]
  1442. attn_config = self.hparams["attn_config"]
  1443. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  1444. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1445. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1446. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  1447. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1448. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  1449. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  1450. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  1451. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  1452. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  1453. self.gguf_writer.add_layer_norm_eps(1e-5)
  1454. self.gguf_writer.add_file_type(self.ftype)
  1455. logger.info(f"gguf: file type = {self.ftype}")
  1456. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1457. del bid # unused
  1458. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  1459. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  1460. n_embd = self.hparams["d_model"]
  1461. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  1462. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  1463. # But llama.cpp moe graph works differently
  1464. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  1465. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  1466. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1467. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  1468. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1469. experts = False
  1470. for exp_tensor_name in exp_tensor_names.keys():
  1471. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  1472. experts = True
  1473. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  1474. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  1475. data_torch = data_torch.permute(*permute_tensor)
  1476. break
  1477. # map tensor names
  1478. # In MoE models the ffn tensors are typically most of the model weights,
  1479. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  1480. # Every other model has the weight names ending in .weight,
  1481. # let's assume that is the convention which is not the case for dbrx:
  1482. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  1483. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  1484. return [(new_name, data_torch)]
  1485. def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
  1486. del name, new_name, bid # unused
  1487. return n_dims > 1
  1488. @Model.register("MiniCPMForCausalLM")
  1489. class MiniCPMModel(Model):
  1490. model_arch = gguf.MODEL_ARCH.MINICPM
  1491. def set_gguf_parameters(self):
  1492. super().set_gguf_parameters()
  1493. embedding_scale = float(self.hparams["scale_emb"])
  1494. self.gguf_writer.add_embedding_scale(embedding_scale)
  1495. logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}")
  1496. residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5
  1497. self.gguf_writer.add_residual_scale(residual_scale)
  1498. logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}")
  1499. logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"]
  1500. self.gguf_writer.add_logit_scale(logit_scale)
  1501. logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}")
  1502. if self.hparams.get("rope_scaling") is not None:
  1503. if self.hparams["rope_scaling"].get("type") == "longrope":
  1504. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE)
  1505. logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}")
  1506. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1507. rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
  1508. rope_scaling = self.find_hparam(['rope_scaling'], True)
  1509. if rope_scaling is not None:
  1510. long_factors = rope_scaling.get('long_factor', None)
  1511. short_factors = rope_scaling.get('short_factor', None)
  1512. if long_factors is None or short_factors is None:
  1513. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  1514. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  1515. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  1516. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  1517. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  1518. def set_vocab(self):
  1519. self._set_vocab_sentencepiece()
  1520. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1521. del bid # unused
  1522. n_head = self.hparams["num_attention_heads"]
  1523. n_kv_head = self.hparams.get("num_key_value_heads")
  1524. # HF models permute some of the tensors, so we need to undo that
  1525. if name.endswith(("q_proj.weight")):
  1526. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1527. if name.endswith(("k_proj.weight")):
  1528. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1529. return [(self.map_tensor_name(name), data_torch)]
  1530. @Model.register("MiniCPM3ForCausalLM")
  1531. class MiniCPM3Model(Model):
  1532. model_arch = gguf.MODEL_ARCH.MINICPM3
  1533. def set_gguf_parameters(self):
  1534. hparams = self.hparams
  1535. self.gguf_writer.add_file_type(self.ftype)
  1536. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1537. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1538. self.gguf_writer.add_block_count(self.block_count)
  1539. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1540. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1541. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1542. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  1543. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1544. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  1545. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  1546. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  1547. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  1548. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  1549. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1550. rope_scaling = self.find_hparam(['rope_scaling'], True)
  1551. if rope_scaling is not None:
  1552. rope_dims = self.hparams["qk_rope_head_dim"]
  1553. long_factors = rope_scaling.get('long_factor', None)
  1554. short_factors = rope_scaling.get('short_factor', None)
  1555. if long_factors is None or short_factors is None:
  1556. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  1557. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  1558. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  1559. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  1560. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  1561. def set_vocab(self):
  1562. self._set_vocab_sentencepiece()
  1563. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1564. if n_kv_head is not None and n_head != n_kv_head:
  1565. n_head //= n_kv_head
  1566. return (
  1567. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1568. .swapaxes(1, 2)
  1569. .reshape(weights.shape)
  1570. )
  1571. @Model.register("QWenLMHeadModel")
  1572. class QwenModel(Model):
  1573. model_arch = gguf.MODEL_ARCH.QWEN
  1574. @staticmethod
  1575. def token_bytes_to_string(b):
  1576. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  1577. byte_encoder = bytes_to_unicode()
  1578. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  1579. @staticmethod
  1580. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  1581. parts = [bytes([b]) for b in token]
  1582. while True:
  1583. min_idx = None
  1584. min_rank = None
  1585. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  1586. rank = mergeable_ranks.get(pair[0] + pair[1])
  1587. if rank is not None and (min_rank is None or rank < min_rank):
  1588. min_idx = i
  1589. min_rank = rank
  1590. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  1591. break
  1592. assert min_idx is not None
  1593. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  1594. return parts
  1595. def set_vocab(self):
  1596. self._set_vocab_qwen()
  1597. def set_gguf_parameters(self):
  1598. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1599. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  1600. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1601. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1602. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  1603. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1604. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1605. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1606. self.gguf_writer.add_file_type(self.ftype)
  1607. @Model.register("Qwen2ForCausalLM")
  1608. class Qwen2Model(Model):
  1609. model_arch = gguf.MODEL_ARCH.QWEN2
  1610. def set_vocab(self):
  1611. try:
  1612. self._set_vocab_sentencepiece()
  1613. except FileNotFoundError:
  1614. self._set_vocab_gpt2()
  1615. def set_gguf_parameters(self):
  1616. super().set_gguf_parameters()
  1617. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1618. if self.hparams["rope_scaling"].get("type") == "yarn":
  1619. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  1620. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1621. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  1622. @Model.register("Qwen2VLForConditionalGeneration")
  1623. class Qwen2VLModel(Model):
  1624. model_arch = gguf.MODEL_ARCH.QWEN2VL
  1625. def set_gguf_parameters(self):
  1626. super().set_gguf_parameters()
  1627. mrope_section = self.hparams["rope_scaling"]["mrope_section"]
  1628. mrope_section += [0] * max(0, 4 - len(mrope_section))
  1629. self.gguf_writer.add_rope_dimension_sections(mrope_section)
  1630. def set_vocab(self):
  1631. try:
  1632. self._set_vocab_sentencepiece()
  1633. except FileNotFoundError:
  1634. self._set_vocab_gpt2()
  1635. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  1636. for name, data in super().get_tensors():
  1637. if name.startswith("visual."):
  1638. continue
  1639. yield name, data
  1640. @Model.register("Qwen2MoeForCausalLM")
  1641. class Qwen2MoeModel(Model):
  1642. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  1643. def set_gguf_parameters(self):
  1644. super().set_gguf_parameters()
  1645. if (n_experts := self.hparams.get("num_experts")) is not None:
  1646. self.gguf_writer.add_expert_count(n_experts)
  1647. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  1648. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  1649. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  1650. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  1651. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  1652. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  1653. _experts: list[dict[str, Tensor]] | None = None
  1654. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1655. # process the experts separately
  1656. if name.find("experts") != -1:
  1657. n_experts = self.hparams["num_experts"]
  1658. assert bid is not None
  1659. if self._experts is None:
  1660. self._experts = [{} for _ in range(self.block_count)]
  1661. self._experts[bid][name] = data_torch
  1662. if len(self._experts[bid]) >= n_experts * 3:
  1663. tensors: list[tuple[str, Tensor]] = []
  1664. # merge the experts into a single 3d tensor
  1665. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  1666. datas: list[Tensor] = []
  1667. for xid in range(n_experts):
  1668. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  1669. datas.append(self._experts[bid][ename])
  1670. del self._experts[bid][ename]
  1671. data_torch = torch.stack(datas, dim=0)
  1672. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  1673. new_name = self.map_tensor_name(merged_name)
  1674. tensors.append((new_name, data_torch))
  1675. return tensors
  1676. else:
  1677. return []
  1678. return [(self.map_tensor_name(name), data_torch)]
  1679. def prepare_tensors(self):
  1680. super().prepare_tensors()
  1681. if self._experts is not None:
  1682. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1683. experts = [k for d in self._experts for k in d.keys()]
  1684. if len(experts) > 0:
  1685. raise ValueError(f"Unprocessed experts: {experts}")
  1686. @Model.register("GPT2LMHeadModel")
  1687. class GPT2Model(Model):
  1688. model_arch = gguf.MODEL_ARCH.GPT2
  1689. def set_gguf_parameters(self):
  1690. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1691. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  1692. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1693. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1694. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1695. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1696. self.gguf_writer.add_file_type(self.ftype)
  1697. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1698. del bid # unused
  1699. tensors: list[tuple[str, Tensor]] = []
  1700. # we don't need these
  1701. if name.endswith((".attn.bias", ".attn.masked_bias")):
  1702. return tensors
  1703. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  1704. data_torch = data_torch.transpose(1, 0)
  1705. new_name = self.map_tensor_name(name)
  1706. tensors.append((new_name, data_torch))
  1707. # note: GPT2 output is tied to (same as) wte in original model
  1708. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  1709. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  1710. return tensors
  1711. @Model.register("PhiForCausalLM")
  1712. class Phi2Model(Model):
  1713. model_arch = gguf.MODEL_ARCH.PHI2
  1714. def set_gguf_parameters(self):
  1715. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  1716. rot_pct = self.find_hparam(["partial_rotary_factor"])
  1717. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  1718. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1719. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  1720. self.gguf_writer.add_embedding_length(n_embd)
  1721. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  1722. self.gguf_writer.add_block_count(block_count)
  1723. self.gguf_writer.add_head_count(n_head)
  1724. self.gguf_writer.add_head_count_kv(n_head)
  1725. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  1726. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  1727. self.gguf_writer.add_file_type(self.ftype)
  1728. self.gguf_writer.add_add_bos_token(False)
  1729. @Model.register("Phi3ForCausalLM")
  1730. class Phi3MiniModel(Model):
  1731. model_arch = gguf.MODEL_ARCH.PHI3
  1732. def set_vocab(self):
  1733. from sentencepiece import SentencePieceProcessor
  1734. tokenizer_path = self.dir_model / 'tokenizer.model'
  1735. if not tokenizer_path.is_file():
  1736. raise ValueError(f'Error: Missing {tokenizer_path}')
  1737. tokenizer = SentencePieceProcessor()
  1738. tokenizer.LoadFromFile(str(tokenizer_path))
  1739. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  1740. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  1741. scores: list[float] = [-10000.0] * vocab_size
  1742. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  1743. for token_id in range(tokenizer.vocab_size()):
  1744. piece = tokenizer.IdToPiece(token_id)
  1745. text = piece.encode("utf-8")
  1746. score = tokenizer.GetScore(token_id)
  1747. toktype = SentencePieceTokenTypes.NORMAL
  1748. if tokenizer.IsUnknown(token_id):
  1749. toktype = SentencePieceTokenTypes.UNKNOWN
  1750. elif tokenizer.IsControl(token_id):
  1751. toktype = SentencePieceTokenTypes.CONTROL
  1752. elif tokenizer.IsUnused(token_id):
  1753. toktype = SentencePieceTokenTypes.UNUSED
  1754. elif tokenizer.IsByte(token_id):
  1755. toktype = SentencePieceTokenTypes.BYTE
  1756. tokens[token_id] = text
  1757. scores[token_id] = score
  1758. toktypes[token_id] = toktype
  1759. added_tokens_file = self.dir_model / 'added_tokens.json'
  1760. if added_tokens_file.is_file():
  1761. with open(added_tokens_file, "r", encoding="utf-8") as f:
  1762. added_tokens_json = json.load(f)
  1763. for key in added_tokens_json:
  1764. token_id = added_tokens_json[key]
  1765. if token_id >= vocab_size:
  1766. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  1767. continue
  1768. tokens[token_id] = key.encode("utf-8")
  1769. scores[token_id] = -1000.0
  1770. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1771. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1772. if tokenizer_config_file.is_file():
  1773. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1774. tokenizer_config_json = json.load(f)
  1775. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  1776. for token_id, foken_data in added_tokens_decoder.items():
  1777. token_id = int(token_id)
  1778. token = foken_data["content"].encode("utf-8")
  1779. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1780. if tokens[token_id] != token:
  1781. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1782. tokens[token_id] = token
  1783. scores[token_id] = -1000.0
  1784. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1785. if foken_data.get("special"):
  1786. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1787. tokenizer_file = self.dir_model / 'tokenizer.json'
  1788. if tokenizer_file.is_file():
  1789. with open(tokenizer_file, "r", encoding="utf-8") as f:
  1790. tokenizer_json = json.load(f)
  1791. added_tokens = tokenizer_json.get("added_tokens", [])
  1792. for foken_data in added_tokens:
  1793. token_id = int(foken_data["id"])
  1794. token = foken_data["content"].encode("utf-8")
  1795. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1796. if tokens[token_id] != token:
  1797. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1798. tokens[token_id] = token
  1799. scores[token_id] = -1000.0
  1800. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1801. if foken_data.get("special"):
  1802. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1803. self.gguf_writer.add_tokenizer_model("llama")
  1804. self.gguf_writer.add_tokenizer_pre("default")
  1805. self.gguf_writer.add_token_list(tokens)
  1806. self.gguf_writer.add_token_scores(scores)
  1807. self.gguf_writer.add_token_types(toktypes)
  1808. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1809. special_vocab.add_to_gguf(self.gguf_writer)
  1810. def set_gguf_parameters(self):
  1811. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  1812. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  1813. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1814. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  1815. rms_eps = self.find_hparam(["rms_norm_eps"])
  1816. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  1817. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  1818. rope_dims = n_embd // n_head
  1819. self.gguf_writer.add_context_length(max_pos_embds)
  1820. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  1821. self.gguf_writer.add_embedding_length(n_embd)
  1822. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  1823. self.gguf_writer.add_block_count(block_count)
  1824. self.gguf_writer.add_head_count(n_head)
  1825. self.gguf_writer.add_head_count_kv(n_head_kv)
  1826. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  1827. self.gguf_writer.add_rope_dimension_count(rope_dims)
  1828. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  1829. self.gguf_writer.add_file_type(self.ftype)
  1830. self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"]))
  1831. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  1832. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  1833. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1834. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  1835. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  1836. rope_dims = n_embd // n_head
  1837. # write rope scaling for long context (128k) model
  1838. rope_scaling = self.find_hparam(['rope_scaling'], True)
  1839. if rope_scaling is None:
  1840. return
  1841. scale = max_pos_embds / orig_max_pos_embds
  1842. rope_scaling_type = rope_scaling.get('type', '').lower()
  1843. if len(rope_scaling_type) == 0:
  1844. raise KeyError('Missing the required key rope_scaling.type')
  1845. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  1846. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  1847. elif rope_scaling_type == 'yarn':
  1848. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  1849. else:
  1850. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  1851. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  1852. long_factors = rope_scaling.get('long_factor', None)
  1853. short_factors = rope_scaling.get('short_factor', None)
  1854. if long_factors is None or short_factors is None:
  1855. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  1856. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  1857. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  1858. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32))
  1859. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32))
  1860. @Model.register("PlamoForCausalLM")
  1861. class PlamoModel(Model):
  1862. model_arch = gguf.MODEL_ARCH.PLAMO
  1863. def set_vocab(self):
  1864. self._set_vocab_sentencepiece()
  1865. def set_gguf_parameters(self):
  1866. hparams = self.hparams
  1867. block_count = hparams["num_hidden_layers"]
  1868. self.gguf_writer.add_context_length(4096) # not in config.json
  1869. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1870. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1871. self.gguf_writer.add_block_count(block_count)
  1872. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1873. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  1874. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  1875. self.gguf_writer.add_file_type(self.ftype)
  1876. def shuffle_attn_q_weight(self, data_torch):
  1877. assert data_torch.size() == (5120, 5120)
  1878. data_torch = data_torch.reshape(8, 5, 128, 5120)
  1879. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  1880. data_torch = torch.reshape(data_torch, (5120, 5120))
  1881. return data_torch
  1882. def shuffle_attn_output_weight(self, data_torch):
  1883. assert data_torch.size() == (5120, 5120)
  1884. data_torch = data_torch.reshape(5120, 8, 5, 128)
  1885. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  1886. data_torch = torch.reshape(data_torch, (5120, 5120))
  1887. return data_torch
  1888. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1889. del bid # unused
  1890. new_name = self.map_tensor_name(name)
  1891. # shuffle for broadcasting of gqa in ggml_mul_mat
  1892. if new_name.endswith("attn_q.weight"):
  1893. data_torch = self.shuffle_attn_q_weight(data_torch)
  1894. elif new_name.endswith("attn_output.weight"):
  1895. data_torch = self.shuffle_attn_output_weight(data_torch)
  1896. return [(new_name, data_torch)]
  1897. @Model.register("CodeShellForCausalLM")
  1898. class CodeShellModel(Model):
  1899. model_arch = gguf.MODEL_ARCH.CODESHELL
  1900. def set_gguf_parameters(self):
  1901. block_count = self.hparams["n_layer"]
  1902. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1903. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1904. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1905. self.gguf_writer.add_block_count(block_count)
  1906. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1907. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  1908. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1909. self.gguf_writer.add_file_type(self.ftype)
  1910. self.gguf_writer.add_rope_freq_base(10000.0)
  1911. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1912. self.gguf_writer.add_rope_scaling_factor(1.0)
  1913. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1914. del bid # unused
  1915. new_name = self.map_tensor_name(name)
  1916. tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)]
  1917. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  1918. assert self.tensor_names is not None
  1919. if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
  1920. # copy tok_embd.weight to output.weight
  1921. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  1922. return tensors
  1923. @Model.register("InternLM2ForCausalLM")
  1924. class InternLM2Model(Model):
  1925. model_arch = gguf.MODEL_ARCH.INTERNLM2
  1926. def set_vocab(self):
  1927. # (TODO): Is there a better way?
  1928. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  1929. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  1930. # recognized as an empty string in C++.
  1931. from sentencepiece import SentencePieceProcessor
  1932. from sentencepiece import sentencepiece_model_pb2 as model
  1933. tokenizer_path = self.dir_model / 'tokenizer.model'
  1934. tokens: list[bytes] = []
  1935. scores: list[float] = []
  1936. toktypes: list[int] = []
  1937. if not tokenizer_path.is_file():
  1938. logger.error(f'Error: Missing {tokenizer_path}')
  1939. sys.exit(1)
  1940. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  1941. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  1942. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  1943. tokenizer = SentencePieceProcessor()
  1944. tokenizer.LoadFromFile(str(tokenizer_path))
  1945. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  1946. for token_id in range(vocab_size):
  1947. piece = tokenizer.IdToPiece(token_id)
  1948. text = piece.encode("utf-8")
  1949. score = tokenizer.GetScore(token_id)
  1950. if text == b"\x00":
  1951. # (TODO): fixme
  1952. # Hack here and replace the \x00 characters.
  1953. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  1954. text = "🐉".encode("utf-8")
  1955. toktype = SentencePieceTokenTypes.NORMAL
  1956. if tokenizer.IsUnknown(token_id):
  1957. toktype = SentencePieceTokenTypes.UNKNOWN
  1958. elif tokenizer.IsControl(token_id):
  1959. toktype = SentencePieceTokenTypes.CONTROL
  1960. elif tokenizer.IsUnused(token_id):
  1961. toktype = SentencePieceTokenTypes.UNUSED
  1962. elif tokenizer.IsByte(token_id):
  1963. toktype = SentencePieceTokenTypes.BYTE
  1964. # take care of ununsed raw token
  1965. if piece.startswith('[UNUSED'):
  1966. toktype = SentencePieceTokenTypes.UNUSED
  1967. tokens.append(text)
  1968. scores.append(score)
  1969. toktypes.append(toktype)
  1970. added_tokens_file = self.dir_model / 'added_tokens.json'
  1971. if added_tokens_file.is_file():
  1972. with open(added_tokens_file, "r", encoding="utf-8") as f:
  1973. added_tokens_json = json.load(f)
  1974. for key in added_tokens_json:
  1975. tokens.append(key.encode("utf-8"))
  1976. scores.append(-1000.0)
  1977. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  1978. chat_eos_token = '<|im_end|>'
  1979. chat_eos_token_id = None
  1980. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1981. if tokenizer_config_file.is_file():
  1982. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1983. tokenizer_config_json = json.load(f)
  1984. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  1985. for token_id, foken_data in added_tokens_decoder.items():
  1986. token_id = int(token_id)
  1987. token = foken_data["content"]
  1988. if token == chat_eos_token:
  1989. chat_eos_token_id = token_id
  1990. token = token.encode("utf-8")
  1991. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1992. if tokens[token_id] != token:
  1993. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1994. tokens[token_id] = token
  1995. scores[token_id] = -1000.0
  1996. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1997. if foken_data.get("special"):
  1998. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1999. tokenizer_file = self.dir_model / 'tokenizer.json'
  2000. if tokenizer_file.is_file():
  2001. with open(tokenizer_file, "r", encoding="utf-8") as f:
  2002. tokenizer_json = json.load(f)
  2003. added_tokens = tokenizer_json.get("added_tokens", [])
  2004. for foken_data in added_tokens:
  2005. token_id = int(foken_data["id"])
  2006. token = foken_data["content"]
  2007. if token == chat_eos_token:
  2008. chat_eos_token_id = token_id
  2009. token = token.encode("utf-8")
  2010. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  2011. if tokens[token_id] != token:
  2012. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  2013. tokens[token_id] = token
  2014. scores[token_id] = -1000.0
  2015. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2016. if foken_data.get("special"):
  2017. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  2018. self.gguf_writer.add_tokenizer_model("llama")
  2019. self.gguf_writer.add_tokenizer_pre("default")
  2020. self.gguf_writer.add_token_list(tokens)
  2021. self.gguf_writer.add_token_scores(scores)
  2022. self.gguf_writer.add_token_types(toktypes)
  2023. self.gguf_writer.add_add_space_prefix(add_prefix)
  2024. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2025. old_eos = special_vocab.special_token_ids["eos"]
  2026. if chat_eos_token_id is not None:
  2027. # For the chat model, we replace the eos with '<|im_end|>'.
  2028. # TODO: this is a hack, should be fixed
  2029. # https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048
  2030. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  2031. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  2032. " in chat mode so that the conversation can end normally.")
  2033. special_vocab.add_to_gguf(self.gguf_writer)
  2034. def set_gguf_parameters(self):
  2035. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  2036. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  2037. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  2038. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  2039. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  2040. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  2041. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2042. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  2043. self.gguf_writer.add_file_type(self.ftype)
  2044. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2045. if self.hparams["rope_scaling"].get("type") == "linear":
  2046. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  2047. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2048. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2049. num_heads = self.hparams["num_attention_heads"]
  2050. num_kv_heads = self.hparams["num_key_value_heads"]
  2051. n_embd = self.hparams["hidden_size"]
  2052. q_per_kv = num_heads // num_kv_heads
  2053. head_dim = n_embd // num_heads
  2054. num_groups = num_heads // q_per_kv
  2055. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  2056. qkv = data_torch
  2057. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  2058. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  2059. # The model weights of q and k equire additional reshape.
  2060. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  2061. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  2062. v = v.reshape((-1, v.shape[-1]))
  2063. return [
  2064. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  2065. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  2066. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  2067. ]
  2068. else:
  2069. return [(self.map_tensor_name(name), data_torch)]
  2070. @Model.register("BertModel", "CamembertModel", "RobertaModel")
  2071. class BertModel(Model):
  2072. model_arch = gguf.MODEL_ARCH.BERT
  2073. def __init__(self, *args, **kwargs):
  2074. super().__init__(*args, **kwargs)
  2075. self.vocab_size = None
  2076. def set_gguf_parameters(self):
  2077. super().set_gguf_parameters()
  2078. self.gguf_writer.add_causal_attention(False)
  2079. # get pooling path
  2080. pooling_path = None
  2081. module_path = self.dir_model / "modules.json"
  2082. if module_path.is_file():
  2083. with open(module_path, encoding="utf-8") as f:
  2084. modules = json.load(f)
  2085. for mod in modules:
  2086. if mod["type"] == "sentence_transformers.models.Pooling":
  2087. pooling_path = mod["path"]
  2088. break
  2089. # get pooling type
  2090. if pooling_path is not None:
  2091. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  2092. pooling = json.load(f)
  2093. if pooling["pooling_mode_mean_tokens"]:
  2094. pooling_type = gguf.PoolingType.MEAN
  2095. elif pooling["pooling_mode_cls_token"]:
  2096. pooling_type = gguf.PoolingType.CLS
  2097. else:
  2098. raise NotImplementedError("Only MEAN and CLS pooling types supported")
  2099. self.gguf_writer.add_pooling_type(pooling_type)
  2100. def set_vocab(self):
  2101. tokens, toktypes, tokpre = self.get_vocab_base()
  2102. self.vocab_size = len(tokens)
  2103. # we need this to validate the size of the token_type embeddings
  2104. # though currently we are passing all zeros to the token_type embeddings
  2105. # "Sequence A" or "Sequence B"
  2106. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  2107. # convert to phantom space vocab
  2108. def phantom(tok):
  2109. if tok.startswith("[") and tok.endswith("]"):
  2110. return tok
  2111. if tok.startswith("##"):
  2112. return tok[2:]
  2113. return "\u2581" + tok
  2114. tokens = list(map(phantom, tokens))
  2115. # add vocab to gguf
  2116. self.gguf_writer.add_tokenizer_model("bert")
  2117. self.gguf_writer.add_tokenizer_pre(tokpre)
  2118. self.gguf_writer.add_token_list(tokens)
  2119. self.gguf_writer.add_token_types(toktypes)
  2120. # handle special tokens
  2121. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2122. special_vocab.add_to_gguf(self.gguf_writer)
  2123. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2124. del bid # unused
  2125. # we are only using BERT for embeddings so we don't need the pooling layer
  2126. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  2127. return [] # we don't need these
  2128. return [(self.map_tensor_name(name), data_torch)]
  2129. @Model.register("NomicBertModel")
  2130. class NomicBertModel(BertModel):
  2131. model_arch = gguf.MODEL_ARCH.NOMIC_BERT
  2132. def __init__(self, *args, **kwargs):
  2133. super().__init__(*args, **kwargs)
  2134. # the HF config claims n_ctx=8192, but it uses RoPE scaling
  2135. self.hparams["n_ctx"] = 2048
  2136. # SwigLU activation
  2137. assert self.hparams["activation_function"] == "swiglu"
  2138. # this doesn't do anything in the HF version
  2139. assert self.hparams["causal"] is False
  2140. # no bias tensors
  2141. assert self.hparams["qkv_proj_bias"] is False
  2142. assert self.hparams["mlp_fc1_bias"] is False
  2143. assert self.hparams["mlp_fc2_bias"] is False
  2144. # norm at end of layer
  2145. assert self.hparams["prenorm"] is False
  2146. # standard RoPE
  2147. assert self.hparams["rotary_emb_fraction"] == 1.0
  2148. assert self.hparams["rotary_emb_interleaved"] is False
  2149. assert self.hparams["rotary_emb_scale_base"] is None
  2150. def set_gguf_parameters(self):
  2151. super().set_gguf_parameters()
  2152. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  2153. @Model.register("XLMRobertaModel", "XLMRobertaForSequenceClassification")
  2154. class XLMRobertaModel(BertModel):
  2155. model_arch = gguf.MODEL_ARCH.BERT
  2156. def __init__(self, *args, **kwargs):
  2157. super().__init__(*args, **kwargs)
  2158. # we need the pad_token_id to know how to chop down position_embd matrix
  2159. if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
  2160. self._position_offset = 1 + pad_token_id
  2161. if "max_position_embeddings" in self.hparams:
  2162. self.hparams["max_position_embeddings"] -= self._position_offset
  2163. else:
  2164. self._position_offset = None
  2165. def set_vocab(self):
  2166. # to avoid TypeError: Descriptors cannot be created directly
  2167. # exception when importing sentencepiece_model_pb2
  2168. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  2169. from sentencepiece import SentencePieceProcessor
  2170. from sentencepiece import sentencepiece_model_pb2 as model
  2171. tokenizer_path = self.dir_model / 'sentencepiece.bpe.model'
  2172. if not tokenizer_path.is_file():
  2173. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  2174. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2175. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2176. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  2177. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2178. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  2179. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  2180. tokenizer = SentencePieceProcessor()
  2181. tokenizer.LoadFromFile(str(tokenizer_path))
  2182. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2183. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2184. scores: list[float] = [-10000.0] * vocab_size
  2185. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2186. for token_id in range(tokenizer.vocab_size()):
  2187. piece = tokenizer.IdToPiece(token_id)
  2188. text = piece.encode("utf-8")
  2189. score = tokenizer.GetScore(token_id)
  2190. toktype = SentencePieceTokenTypes.NORMAL
  2191. if tokenizer.IsUnknown(token_id):
  2192. toktype = SentencePieceTokenTypes.UNKNOWN
  2193. elif tokenizer.IsControl(token_id):
  2194. toktype = SentencePieceTokenTypes.CONTROL
  2195. elif tokenizer.IsUnused(token_id):
  2196. toktype = SentencePieceTokenTypes.UNUSED
  2197. elif tokenizer.IsByte(token_id):
  2198. toktype = SentencePieceTokenTypes.BYTE
  2199. tokens[token_id] = text
  2200. scores[token_id] = score
  2201. toktypes[token_id] = toktype
  2202. if vocab_size > len(tokens):
  2203. pad_count = vocab_size - len(tokens)
  2204. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  2205. for i in range(1, pad_count + 1):
  2206. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  2207. scores.append(-1000.0)
  2208. toktypes.append(SentencePieceTokenTypes.UNUSED)
  2209. # realign tokens (see HF tokenizer code)
  2210. tokens = [b'<s>', b'<pad>', b'</s>', b'<unk>'] + tokens[3:-1]
  2211. scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1]
  2212. toktypes = [
  2213. SentencePieceTokenTypes.CONTROL,
  2214. SentencePieceTokenTypes.CONTROL,
  2215. SentencePieceTokenTypes.CONTROL,
  2216. SentencePieceTokenTypes.UNKNOWN,
  2217. ] + toktypes[3:-1]
  2218. self.gguf_writer.add_tokenizer_model("t5")
  2219. self.gguf_writer.add_tokenizer_pre("default")
  2220. self.gguf_writer.add_token_list(tokens)
  2221. self.gguf_writer.add_token_scores(scores)
  2222. self.gguf_writer.add_token_types(toktypes)
  2223. self.gguf_writer.add_add_space_prefix(add_prefix)
  2224. self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
  2225. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  2226. if precompiled_charsmap:
  2227. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  2228. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2229. special_vocab.add_to_gguf(self.gguf_writer)
  2230. self.gguf_writer.add_add_bos_token(True)
  2231. self.gguf_writer.add_add_eos_token(True)
  2232. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2233. # if name starts with "roberta.", remove the prefix
  2234. # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
  2235. if name.startswith("roberta."):
  2236. name = name[8:]
  2237. # position embeddings start at pad_token_id + 1, so just chop down the weight tensor
  2238. if name == "embeddings.position_embeddings.weight":
  2239. if self._position_offset is not None:
  2240. data_torch = data_torch[self._position_offset:,:]
  2241. return super().modify_tensors(data_torch, name, bid)
  2242. @Model.register("GemmaForCausalLM")
  2243. class GemmaModel(Model):
  2244. model_arch = gguf.MODEL_ARCH.GEMMA
  2245. def set_vocab(self):
  2246. self._set_vocab_sentencepiece()
  2247. # TODO: these special tokens should be exported only for the CodeGemma family
  2248. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  2249. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  2250. special_vocab._set_special_token("prefix", 67)
  2251. special_vocab._set_special_token("suffix", 69)
  2252. special_vocab._set_special_token("middle", 68)
  2253. special_vocab._set_special_token("fsep", 70)
  2254. special_vocab._set_special_token("eot", 107)
  2255. special_vocab.chat_template = None # do not add it twice
  2256. special_vocab.add_to_gguf(self.gguf_writer)
  2257. self.gguf_writer.add_add_space_prefix(False)
  2258. def set_gguf_parameters(self):
  2259. hparams = self.hparams
  2260. block_count = hparams["num_hidden_layers"]
  2261. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2262. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2263. self.gguf_writer.add_block_count(block_count)
  2264. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2265. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2266. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  2267. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2268. self.gguf_writer.add_key_length(hparams["head_dim"])
  2269. self.gguf_writer.add_value_length(hparams["head_dim"])
  2270. self.gguf_writer.add_file_type(self.ftype)
  2271. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2272. del bid # unused
  2273. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  2274. # To prevent errors, skip loading lm_head.weight.
  2275. if name == "lm_head.weight":
  2276. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  2277. return []
  2278. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  2279. if name.endswith("norm.weight"):
  2280. data_torch = data_torch + 1
  2281. return [(self.map_tensor_name(name), data_torch)]
  2282. @Model.register("Gemma2ForCausalLM")
  2283. class Gemma2Model(Model):
  2284. model_arch = gguf.MODEL_ARCH.GEMMA2
  2285. def set_vocab(self):
  2286. self._set_vocab_sentencepiece()
  2287. self.gguf_writer.add_add_space_prefix(False)
  2288. def set_gguf_parameters(self):
  2289. hparams = self.hparams
  2290. block_count = hparams["num_hidden_layers"]
  2291. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2292. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2293. self.gguf_writer.add_block_count(block_count)
  2294. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2295. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2296. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  2297. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2298. self.gguf_writer.add_key_length(hparams["head_dim"])
  2299. self.gguf_writer.add_value_length(hparams["head_dim"])
  2300. self.gguf_writer.add_file_type(self.ftype)
  2301. self.gguf_writer.add_attn_logit_softcapping(
  2302. self.hparams["attn_logit_softcapping"]
  2303. )
  2304. self.gguf_writer.add_final_logit_softcapping(
  2305. self.hparams["final_logit_softcapping"]
  2306. )
  2307. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  2308. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2309. del bid # unused
  2310. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  2311. # To prevent errors, skip loading lm_head.weight.
  2312. if name == "lm_head.weight":
  2313. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  2314. return []
  2315. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  2316. if name.endswith("norm.weight"):
  2317. data_torch = data_torch + 1
  2318. return [(self.map_tensor_name(name), data_torch)]
  2319. @Model.register("Starcoder2ForCausalLM")
  2320. class StarCoder2Model(Model):
  2321. model_arch = gguf.MODEL_ARCH.STARCODER2
  2322. @Model.register("Rwkv6ForCausalLM")
  2323. class Rwkv6Model(Model):
  2324. model_arch = gguf.MODEL_ARCH.RWKV6
  2325. def set_vocab(self):
  2326. assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file()
  2327. vocab_size = self.hparams.get("vocab_size", 65536)
  2328. tokens: list[bytes] = ['<s>'.encode("utf-8")]
  2329. toktypes: list[int] = [gguf.TokenType.CONTROL]
  2330. with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f:
  2331. lines = f.readlines()
  2332. for line in lines:
  2333. parts = line.split(' ')
  2334. assert len(parts) >= 3
  2335. token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1])
  2336. token = token.encode("utf-8") if isinstance(token, str) else token
  2337. assert isinstance(token, bytes)
  2338. assert len(token) == token_len
  2339. token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff"
  2340. tokens.append(token_text.encode("utf-8"))
  2341. toktypes.append(gguf.TokenType.NORMAL)
  2342. remainder = vocab_size - len(tokens)
  2343. assert remainder >= 0
  2344. for i in range(len(tokens), vocab_size):
  2345. tokens.append(f"[PAD{i}]".encode("utf-8"))
  2346. toktypes.append(gguf.TokenType.UNUSED)
  2347. self.gguf_writer.add_tokenizer_model("rwkv")
  2348. self.gguf_writer.add_token_list(tokens)
  2349. self.gguf_writer.add_token_types(toktypes)
  2350. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
  2351. special_vocab.chat_template = "rwkv-world"
  2352. # hack: Add '\n\n' as the EOT token to make it chat normally
  2353. special_vocab._set_special_token("eot", 261)
  2354. special_vocab.add_to_gguf(self.gguf_writer)
  2355. def set_gguf_parameters(self):
  2356. block_count = self.hparams["num_hidden_layers"]
  2357. head_size = self.hparams["head_size"]
  2358. hidden_size = self.hparams["hidden_size"]
  2359. layer_norm_eps = self.hparams["layer_norm_epsilon"]
  2360. rescale_every_n_layers = self.hparams["rescale_every"]
  2361. intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else int((hidden_size * 3.5) // 32 * 32)
  2362. time_mix_extra_dim = 64 if hidden_size == 4096 else 32
  2363. time_decay_extra_dim = 128 if hidden_size == 4096 else 64
  2364. # RWKV isn't context limited
  2365. self.gguf_writer.add_context_length(1048576)
  2366. self.gguf_writer.add_embedding_length(hidden_size)
  2367. self.gguf_writer.add_block_count(block_count)
  2368. self.gguf_writer.add_layer_norm_eps(layer_norm_eps)
  2369. self.gguf_writer.add_rescale_every_n_layers(rescale_every_n_layers)
  2370. self.gguf_writer.add_wkv_head_size(head_size)
  2371. self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim)
  2372. self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim)
  2373. self.gguf_writer.add_feed_forward_length(intermediate_size)
  2374. self.gguf_writer.add_file_type(self.ftype)
  2375. # required by llama.cpp, unused
  2376. self.gguf_writer.add_head_count(0)
  2377. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2378. new_name = self.map_tensor_name(name)
  2379. if not (new_name.endswith(".weight") or new_name.endswith(".bias")):
  2380. new_name += ".weight"
  2381. if new_name.endswith("time_mix_w1.weight") or new_name.endswith("time_mix_decay_w1.weight") or new_name.endswith("time_mix_decay_w2.weight"):
  2382. data_torch = data_torch.transpose(0, 1)
  2383. if new_name.endswith("time_mix_w2.weight"):
  2384. data_torch = data_torch.permute(0, 2, 1)
  2385. rescale_every_n_layers = self.hparams["rescale_every"]
  2386. if rescale_every_n_layers > 0:
  2387. if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):
  2388. data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers))
  2389. yield (new_name, data_torch)
  2390. @Model.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
  2391. class MambaModel(Model):
  2392. model_arch = gguf.MODEL_ARCH.MAMBA
  2393. def set_vocab(self):
  2394. vocab_size = self.hparams["vocab_size"]
  2395. # Round vocab size to next multiple of 8
  2396. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  2397. # pad using ceiling division
  2398. # ref: https://stackoverflow.com/a/17511341/22827863
  2399. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  2400. self.hparams["vocab_size"] = vocab_size
  2401. if (self.dir_model / "tokenizer.json").is_file():
  2402. self._set_vocab_gpt2()
  2403. elif (self.dir_model / "tokenizer.model").is_file():
  2404. self._set_vocab_sentencepiece()
  2405. else:
  2406. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  2407. self._set_vocab_builtin("gpt-neox", vocab_size)
  2408. def set_gguf_parameters(self):
  2409. d_model = self.find_hparam(["hidden_size", "d_model"])
  2410. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  2411. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  2412. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  2413. # ceiling division
  2414. # ref: https://stackoverflow.com/a/17511341/22827863
  2415. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  2416. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  2417. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  2418. use_dt_b_c_norm = False
  2419. # For falconmamba we do apply RMS norm on B / DT and C layers
  2420. if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
  2421. use_dt_b_c_norm = True
  2422. # Fail early for models which don't have a block expansion factor of 2
  2423. assert d_inner == 2 * d_model
  2424. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  2425. self.gguf_writer.add_embedding_length(d_model)
  2426. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  2427. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  2428. self.gguf_writer.add_block_count(self.block_count)
  2429. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  2430. self.gguf_writer.add_ssm_inner_size(d_inner)
  2431. self.gguf_writer.add_ssm_state_size(d_state)
  2432. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  2433. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  2434. self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
  2435. self.gguf_writer.add_file_type(self.ftype)
  2436. _tok_embd = None
  2437. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2438. del bid # unused
  2439. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  2440. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  2441. new_name = self.map_tensor_name(name)
  2442. if name.endswith(".A_log"):
  2443. logger.debug("A_log --> A ==> " + new_name)
  2444. data_torch = -torch.exp(data_torch)
  2445. # assuming token_embd.weight is seen before output.weight
  2446. if self._tok_embd is not None and new_name == output_name:
  2447. if torch.equal(self._tok_embd, data_torch):
  2448. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  2449. return []
  2450. elif new_name == tok_embd_name:
  2451. self._tok_embd = data_torch
  2452. return [(new_name, data_torch)]
  2453. @Model.register("CohereForCausalLM")
  2454. class CommandR2Model(Model):
  2455. model_arch = gguf.MODEL_ARCH.COMMAND_R
  2456. def __init__(self, *args, **kwargs):
  2457. super().__init__(*args, **kwargs)
  2458. # max_position_embeddings = 8192 in config.json but model was actually
  2459. # trained on 128k context length
  2460. # aya-23 models don't have model_max_length specified
  2461. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  2462. def set_gguf_parameters(self):
  2463. super().set_gguf_parameters()
  2464. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  2465. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  2466. @Model.register("OlmoForCausalLM")
  2467. @Model.register("OLMoForCausalLM")
  2468. class OlmoModel(Model):
  2469. model_arch = gguf.MODEL_ARCH.OLMO
  2470. def set_gguf_parameters(self):
  2471. super().set_gguf_parameters()
  2472. self.gguf_writer.add_layer_norm_eps(1e-5)
  2473. clip_qkv = self.hparams.get("clip_qkv")
  2474. if clip_qkv is not None:
  2475. self.gguf_writer.add_clamp_kqv(clip_qkv)
  2476. # Same as super class, but permuting q_proj, k_proj
  2477. # Copied from: LlamaModel
  2478. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2479. del bid # unused
  2480. n_head = self.hparams["num_attention_heads"]
  2481. n_kv_head = self.hparams.get("num_key_value_heads")
  2482. if name.endswith("q_proj.weight"):
  2483. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2484. if name.endswith("k_proj.weight"):
  2485. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2486. return [(self.map_tensor_name(name), data_torch)]
  2487. @Model.register("Olmo2ForCausalLM")
  2488. class Olmo2Model(Model):
  2489. model_arch = gguf.MODEL_ARCH.OLMO2
  2490. @Model.register("OlmoeForCausalLM")
  2491. class OlmoeModel(Model):
  2492. model_arch = gguf.MODEL_ARCH.OLMOE
  2493. def set_gguf_parameters(self):
  2494. super().set_gguf_parameters()
  2495. self.gguf_writer.add_layer_norm_rms_eps(1e-5)
  2496. if (n_experts := self.hparams.get("num_experts")) is not None:
  2497. self.gguf_writer.add_expert_count(n_experts)
  2498. _experts: list[dict[str, Tensor]] | None = None
  2499. # Copied from: Qwen2MoeModel
  2500. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2501. # process the experts separately
  2502. if name.find("experts") != -1:
  2503. n_experts = self.hparams["num_experts"]
  2504. assert bid is not None
  2505. if self._experts is None:
  2506. self._experts = [{} for _ in range(self.block_count)]
  2507. self._experts[bid][name] = data_torch
  2508. if len(self._experts[bid]) >= n_experts * 3:
  2509. tensors: list[tuple[str, Tensor]] = []
  2510. # merge the experts into a single 3d tensor
  2511. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  2512. datas: list[Tensor] = []
  2513. for xid in range(n_experts):
  2514. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2515. datas.append(self._experts[bid][ename])
  2516. del self._experts[bid][ename]
  2517. data_torch = torch.stack(datas, dim=0)
  2518. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2519. new_name = self.map_tensor_name(merged_name)
  2520. tensors.append((new_name, data_torch))
  2521. return tensors
  2522. else:
  2523. return []
  2524. return [(self.map_tensor_name(name), data_torch)]
  2525. # Copied from: Qwen2MoeModel
  2526. def prepare_tensors(self):
  2527. super().prepare_tensors()
  2528. if self._experts is not None:
  2529. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2530. experts = [k for d in self._experts for k in d.keys()]
  2531. if len(experts) > 0:
  2532. raise ValueError(f"Unprocessed experts: {experts}")
  2533. @Model.register("JinaBertModel", "JinaBertForMaskedLM")
  2534. class JinaBertV2Model(BertModel):
  2535. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  2536. def __init__(self, *args, **kwargs):
  2537. super().__init__(*args, **kwargs)
  2538. self.intermediate_size = self.hparams["intermediate_size"]
  2539. def get_tensors(self):
  2540. for name, data in super().get_tensors():
  2541. if 'gated_layer' in name:
  2542. d1 = data[:self.intermediate_size, :]
  2543. name1 = name.replace('gated_layers', 'gated_layers_w')
  2544. name1 = name1.replace('up_gated_layer', 'gated_layers_v')
  2545. d2 = data[self.intermediate_size:, :]
  2546. name2 = name.replace('gated_layers', 'gated_layers_v')
  2547. name2 = name2.replace('up_gated_layer', 'gated_layers_w')
  2548. yield name1, d1
  2549. yield name2, d2
  2550. continue
  2551. yield name, data
  2552. def set_vocab(self):
  2553. tokenizer_class = 'BertTokenizer'
  2554. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  2555. tokenizer_class = json.load(f)['tokenizer_class']
  2556. if tokenizer_class == 'BertTokenizer':
  2557. super().set_vocab()
  2558. elif tokenizer_class == 'RobertaTokenizer':
  2559. self._set_vocab_gpt2()
  2560. self.gguf_writer.add_token_type_count(2)
  2561. else:
  2562. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  2563. self.gguf_writer.add_add_bos_token(True)
  2564. self.gguf_writer.add_add_eos_token(True)
  2565. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2566. # if name starts with "bert.", remove the prefix
  2567. # e.g. https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  2568. if name.startswith("bert."):
  2569. name = name[5:]
  2570. return super().modify_tensors(data_torch, name, bid)
  2571. @Model.register("OpenELMForCausalLM")
  2572. class OpenELMModel(Model):
  2573. model_arch = gguf.MODEL_ARCH.OPENELM
  2574. @staticmethod
  2575. def _make_divisible(v: float | int, divisor: int) -> int:
  2576. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  2577. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  2578. # Make sure that round down does not go down by more than 10%.
  2579. if new_v < 0.9 * v:
  2580. new_v += divisor
  2581. return new_v
  2582. def __init__(self, *args, **kwargs):
  2583. super().__init__(*args, **kwargs)
  2584. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  2585. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  2586. self._n_embd: int = self.hparams["model_dim"]
  2587. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  2588. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  2589. self._ffn_dims: list[int] = [
  2590. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  2591. for multiplier in ffn_multipliers
  2592. ]
  2593. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  2594. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  2595. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  2596. def set_vocab(self):
  2597. try:
  2598. self._set_vocab_sentencepiece()
  2599. except FileNotFoundError:
  2600. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  2601. def set_gguf_parameters(self):
  2602. n_embd = self._n_embd
  2603. head_dim = self.hparams["head_dim"]
  2604. rot_pct = 1.0
  2605. assert self.block_count == len(self._num_kv_heads)
  2606. assert self.block_count == len(self._num_query_heads)
  2607. assert self.block_count == len(self._ffn_dims)
  2608. self.gguf_writer.add_block_count(self.block_count)
  2609. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  2610. self.gguf_writer.add_embedding_length(n_embd)
  2611. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  2612. self.gguf_writer.add_head_count(self._num_query_heads)
  2613. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2614. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  2615. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  2616. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  2617. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  2618. self.gguf_writer.add_key_length(head_dim)
  2619. self.gguf_writer.add_value_length(head_dim)
  2620. self.gguf_writer.add_file_type(self.ftype)
  2621. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  2622. if "n_layers" in keys:
  2623. return self.hparams["num_transformer_layers"]
  2624. return super().find_hparam(keys, optional)
  2625. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2626. # split ff
  2627. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  2628. ff_dim = self._ffn_dims[bid]
  2629. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  2630. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  2631. return
  2632. yield (self.map_tensor_name(name), data_torch)
  2633. @Model.register("ArcticForCausalLM")
  2634. class ArcticModel(Model):
  2635. model_arch = gguf.MODEL_ARCH.ARCTIC
  2636. def set_vocab(self):
  2637. # The reason for using a custom implementation here is that the
  2638. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  2639. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  2640. from sentencepiece import SentencePieceProcessor
  2641. tokenizer_path = self.dir_model / 'tokenizer.model'
  2642. if not tokenizer_path.is_file():
  2643. logger.error(f'Error: Missing {tokenizer_path}')
  2644. sys.exit(1)
  2645. # Read the whole vocabulary from the tokenizer.model file
  2646. tokenizer = SentencePieceProcessor()
  2647. tokenizer.LoadFromFile(str(tokenizer_path))
  2648. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2649. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2650. scores: list[float] = [-10000.0] * vocab_size
  2651. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2652. for token_id in range(tokenizer.vocab_size()):
  2653. piece = tokenizer.IdToPiece(token_id)
  2654. text = piece.encode("utf-8")
  2655. score = tokenizer.GetScore(token_id)
  2656. toktype = SentencePieceTokenTypes.NORMAL
  2657. if tokenizer.IsUnknown(token_id):
  2658. toktype = SentencePieceTokenTypes.UNKNOWN
  2659. elif tokenizer.IsControl(token_id):
  2660. toktype = SentencePieceTokenTypes.CONTROL
  2661. elif tokenizer.IsUnused(token_id):
  2662. toktype = SentencePieceTokenTypes.UNUSED
  2663. elif tokenizer.IsByte(token_id):
  2664. toktype = SentencePieceTokenTypes.BYTE
  2665. tokens[token_id] = text
  2666. scores[token_id] = score
  2667. toktypes[token_id] = toktype
  2668. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  2669. # of information about added/redefined tokens and modify them accordingly.
  2670. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2671. if tokenizer_config_file.is_file():
  2672. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2673. tokenizer_config_json = json.load(f)
  2674. if "added_tokens_decoder" in tokenizer_config_json:
  2675. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  2676. for token_id, token_json in added_tokens_decoder.items():
  2677. token_id = int(token_id)
  2678. if token_id >= vocab_size:
  2679. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2680. continue
  2681. token_content = token_json["content"]
  2682. token_type = SentencePieceTokenTypes.USER_DEFINED
  2683. token_score = -10000.0
  2684. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  2685. # Set the score to 0.0 as in the original tokenizer.model
  2686. if ("special" in token_json) and token_json["special"]:
  2687. if token_content == tokenizer_config_json["unk_token"]:
  2688. token_type = SentencePieceTokenTypes.UNKNOWN
  2689. else:
  2690. token_type = SentencePieceTokenTypes.CONTROL
  2691. token_score = 0.0
  2692. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  2693. tokens[token_id] = token_content.encode("utf-8")
  2694. toktypes[token_id] = token_type
  2695. scores[token_id] = token_score
  2696. self.gguf_writer.add_tokenizer_model("llama")
  2697. self.gguf_writer.add_tokenizer_pre("default")
  2698. self.gguf_writer.add_token_list(tokens)
  2699. self.gguf_writer.add_token_scores(scores)
  2700. self.gguf_writer.add_token_types(toktypes)
  2701. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2702. special_vocab.add_to_gguf(self.gguf_writer)
  2703. def set_gguf_parameters(self):
  2704. super().set_gguf_parameters()
  2705. hparams = self.hparams
  2706. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2707. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  2708. _experts: list[dict[str, Tensor]] | None = None
  2709. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2710. n_head = self.hparams["num_attention_heads"]
  2711. n_kv_head = self.hparams.get("num_key_value_heads")
  2712. if name.endswith("q_proj.weight"):
  2713. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2714. if name.endswith("k_proj.weight"):
  2715. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2716. # process the experts separately
  2717. if name.find("block_sparse_moe.experts") != -1:
  2718. n_experts = self.hparams["num_local_experts"]
  2719. assert bid is not None
  2720. if self._experts is None:
  2721. self._experts = [{} for _ in range(self.block_count)]
  2722. self._experts[bid][name] = data_torch
  2723. if len(self._experts[bid]) >= n_experts * 3:
  2724. tensors: list[tuple[str, Tensor]] = []
  2725. # merge the experts into a single 3d tensor
  2726. for wid in ["w1", "w2", "w3"]:
  2727. datas: list[Tensor] = []
  2728. for xid in range(n_experts):
  2729. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  2730. datas.append(self._experts[bid][ename])
  2731. del self._experts[bid][ename]
  2732. data_torch = torch.stack(datas, dim=0)
  2733. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  2734. new_name = self.map_tensor_name(merged_name)
  2735. tensors.append((new_name, data_torch))
  2736. return tensors
  2737. else:
  2738. return []
  2739. return [(self.map_tensor_name(name), data_torch)]
  2740. def prepare_tensors(self):
  2741. super().prepare_tensors()
  2742. if self._experts is not None:
  2743. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2744. experts = [k for d in self._experts for k in d.keys()]
  2745. if len(experts) > 0:
  2746. raise ValueError(f"Unprocessed experts: {experts}")
  2747. @Model.register("DeepseekV2ForCausalLM")
  2748. class DeepseekV2Model(Model):
  2749. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  2750. def set_vocab(self):
  2751. self._set_vocab_gpt2()
  2752. def set_gguf_parameters(self):
  2753. super().set_gguf_parameters()
  2754. hparams = self.hparams
  2755. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  2756. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2757. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2758. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2759. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2760. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2761. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  2762. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  2763. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  2764. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  2765. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  2766. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2767. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2768. if self.hparams["rope_scaling"].get("type") == "yarn":
  2769. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2770. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2771. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  2772. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * hparams["rope_scaling"]["mscale_all_dim"])
  2773. _experts: list[dict[str, Tensor]] | None = None
  2774. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2775. # process the experts separately
  2776. if name.find("mlp.experts") != -1:
  2777. n_experts = self.hparams["n_routed_experts"]
  2778. assert bid is not None
  2779. if self._experts is None:
  2780. self._experts = [{} for _ in range(self.block_count)]
  2781. self._experts[bid][name] = data_torch
  2782. if len(self._experts[bid]) >= n_experts * 3:
  2783. tensors: list[tuple[str, Tensor]] = []
  2784. # merge the experts into a single 3d tensor
  2785. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  2786. datas: list[Tensor] = []
  2787. for xid in range(n_experts):
  2788. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2789. datas.append(self._experts[bid][ename])
  2790. del self._experts[bid][ename]
  2791. data_torch = torch.stack(datas, dim=0)
  2792. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2793. new_name = self.map_tensor_name(merged_name)
  2794. tensors.append((new_name, data_torch))
  2795. return tensors
  2796. else:
  2797. return []
  2798. return [(self.map_tensor_name(name), data_torch)]
  2799. def prepare_tensors(self):
  2800. super().prepare_tensors()
  2801. if self._experts is not None:
  2802. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2803. experts = [k for d in self._experts for k in d.keys()]
  2804. if len(experts) > 0:
  2805. raise ValueError(f"Unprocessed experts: {experts}")
  2806. @Model.register("T5WithLMHeadModel")
  2807. @Model.register("T5ForConditionalGeneration")
  2808. @Model.register("MT5ForConditionalGeneration")
  2809. @Model.register("UMT5ForConditionalGeneration")
  2810. class T5Model(Model):
  2811. model_arch = gguf.MODEL_ARCH.T5
  2812. def __init__(self, *args, **kwargs):
  2813. super().__init__(*args, **kwargs)
  2814. self.shared_token_embeddings_found = False
  2815. def set_vocab(self):
  2816. # to avoid TypeError: Descriptors cannot be created directly
  2817. # exception when importing sentencepiece_model_pb2
  2818. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  2819. from sentencepiece import SentencePieceProcessor
  2820. from sentencepiece import sentencepiece_model_pb2 as model
  2821. tokenizer_path = self.dir_model / 'tokenizer.model'
  2822. # many older models use spiece.model tokenizer model filename
  2823. if not tokenizer_path.is_file():
  2824. tokenizer_path = self.dir_model / 'spiece.model'
  2825. if not tokenizer_path.is_file():
  2826. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  2827. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2828. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2829. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  2830. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  2831. # assure the tokenizer model file name is correct
  2832. assert tokenizer_path.name == 'tokenizer.model'
  2833. return self._set_vocab_sentencepiece()
  2834. else:
  2835. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  2836. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2837. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  2838. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  2839. tokenizer = SentencePieceProcessor()
  2840. tokenizer.LoadFromFile(str(tokenizer_path))
  2841. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2842. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2843. scores: list[float] = [-10000.0] * vocab_size
  2844. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2845. for token_id in range(tokenizer.vocab_size()):
  2846. piece = tokenizer.IdToPiece(token_id)
  2847. text = piece.encode("utf-8")
  2848. score = tokenizer.GetScore(token_id)
  2849. toktype = SentencePieceTokenTypes.NORMAL
  2850. if tokenizer.IsUnknown(token_id):
  2851. toktype = SentencePieceTokenTypes.UNKNOWN
  2852. elif tokenizer.IsControl(token_id):
  2853. toktype = SentencePieceTokenTypes.CONTROL
  2854. elif tokenizer.IsUnused(token_id):
  2855. toktype = SentencePieceTokenTypes.UNUSED
  2856. elif tokenizer.IsByte(token_id):
  2857. toktype = SentencePieceTokenTypes.BYTE
  2858. tokens[token_id] = text
  2859. scores[token_id] = score
  2860. toktypes[token_id] = toktype
  2861. added_tokens_file = self.dir_model / 'added_tokens.json'
  2862. if added_tokens_file.is_file():
  2863. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2864. added_tokens_json = json.load(f)
  2865. for key in added_tokens_json:
  2866. token_id = added_tokens_json[key]
  2867. if token_id >= vocab_size:
  2868. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2869. continue
  2870. tokens[token_id] = key.encode("utf-8")
  2871. scores[token_id] = -1000.0
  2872. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2873. if vocab_size > len(tokens):
  2874. pad_count = vocab_size - len(tokens)
  2875. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  2876. for i in range(1, pad_count + 1):
  2877. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  2878. scores.append(-1000.0)
  2879. toktypes.append(SentencePieceTokenTypes.UNUSED)
  2880. self.gguf_writer.add_tokenizer_model("t5")
  2881. self.gguf_writer.add_tokenizer_pre("default")
  2882. self.gguf_writer.add_token_list(tokens)
  2883. self.gguf_writer.add_token_scores(scores)
  2884. self.gguf_writer.add_token_types(toktypes)
  2885. self.gguf_writer.add_add_space_prefix(add_prefix)
  2886. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  2887. if precompiled_charsmap:
  2888. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  2889. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2890. special_vocab.add_to_gguf(self.gguf_writer)
  2891. self.gguf_writer.add_add_bos_token(False)
  2892. self.gguf_writer.add_add_eos_token(True)
  2893. def set_gguf_parameters(self):
  2894. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  2895. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  2896. n_ctx = 512
  2897. self.gguf_writer.add_context_length(n_ctx)
  2898. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  2899. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  2900. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  2901. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  2902. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  2903. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  2904. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2905. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  2906. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2907. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  2908. self.gguf_writer.add_file_type(self.ftype)
  2909. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2910. del bid # unused
  2911. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  2912. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  2913. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  2914. # and decoder and ignore the remaining ones.
  2915. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  2916. if not self.shared_token_embeddings_found:
  2917. name = "shared.weight"
  2918. self.shared_token_embeddings_found = True
  2919. else:
  2920. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  2921. return []
  2922. return [(self.map_tensor_name(name), data_torch)]
  2923. @Model.register("T5EncoderModel")
  2924. class T5EncoderModel(Model):
  2925. model_arch = gguf.MODEL_ARCH.T5ENCODER
  2926. def __init__(self, *args, **kwargs):
  2927. super().__init__(*args, **kwargs)
  2928. self.shared_token_embeddings_found = False
  2929. def set_vocab(self):
  2930. # to avoid TypeError: Descriptors cannot be created directly
  2931. # exception when importing sentencepiece_model_pb2
  2932. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  2933. from sentencepiece import SentencePieceProcessor
  2934. from sentencepiece import sentencepiece_model_pb2 as model
  2935. tokenizer_path = self.dir_model / 'tokenizer.model'
  2936. # many older models use spiece.model tokenizer model filename
  2937. if not tokenizer_path.is_file():
  2938. tokenizer_path = self.dir_model / 'spiece.model'
  2939. if not tokenizer_path.is_file():
  2940. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  2941. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2942. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2943. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  2944. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  2945. # assure the tokenizer model file name is correct
  2946. assert tokenizer_path.name == 'tokenizer.model'
  2947. return self._set_vocab_sentencepiece()
  2948. else:
  2949. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  2950. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2951. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  2952. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  2953. tokenizer = SentencePieceProcessor()
  2954. tokenizer.LoadFromFile(str(tokenizer_path))
  2955. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2956. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2957. scores: list[float] = [-10000.0] * vocab_size
  2958. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2959. for token_id in range(tokenizer.vocab_size()):
  2960. piece = tokenizer.IdToPiece(token_id)
  2961. text = piece.encode("utf-8")
  2962. score = tokenizer.GetScore(token_id)
  2963. toktype = SentencePieceTokenTypes.NORMAL
  2964. if tokenizer.IsUnknown(token_id):
  2965. toktype = SentencePieceTokenTypes.UNKNOWN
  2966. elif tokenizer.IsControl(token_id):
  2967. toktype = SentencePieceTokenTypes.CONTROL
  2968. elif tokenizer.IsUnused(token_id):
  2969. toktype = SentencePieceTokenTypes.UNUSED
  2970. elif tokenizer.IsByte(token_id):
  2971. toktype = SentencePieceTokenTypes.BYTE
  2972. tokens[token_id] = text
  2973. scores[token_id] = score
  2974. toktypes[token_id] = toktype
  2975. added_tokens_file = self.dir_model / 'added_tokens.json'
  2976. if added_tokens_file.is_file():
  2977. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2978. added_tokens_json = json.load(f)
  2979. for key in added_tokens_json:
  2980. token_id = added_tokens_json[key]
  2981. if token_id >= vocab_size:
  2982. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2983. continue
  2984. tokens[token_id] = key.encode("utf-8")
  2985. scores[token_id] = -1000.0
  2986. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2987. if vocab_size > len(tokens):
  2988. pad_count = vocab_size - len(tokens)
  2989. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  2990. for i in range(1, pad_count + 1):
  2991. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  2992. scores.append(-1000.0)
  2993. toktypes.append(SentencePieceTokenTypes.UNUSED)
  2994. self.gguf_writer.add_tokenizer_model("t5")
  2995. self.gguf_writer.add_tokenizer_pre("default")
  2996. self.gguf_writer.add_token_list(tokens)
  2997. self.gguf_writer.add_token_scores(scores)
  2998. self.gguf_writer.add_token_types(toktypes)
  2999. self.gguf_writer.add_add_space_prefix(add_prefix)
  3000. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  3001. if precompiled_charsmap:
  3002. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  3003. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3004. special_vocab.add_to_gguf(self.gguf_writer)
  3005. self.gguf_writer.add_add_bos_token(False)
  3006. self.gguf_writer.add_add_eos_token(True)
  3007. def set_gguf_parameters(self):
  3008. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  3009. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  3010. n_ctx = 512
  3011. self.gguf_writer.add_context_length(n_ctx)
  3012. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  3013. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  3014. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  3015. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  3016. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  3017. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  3018. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3019. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  3020. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  3021. self.gguf_writer.add_file_type(self.ftype)
  3022. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3023. del bid # unused
  3024. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  3025. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  3026. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  3027. # and decoder and ignore the remaining ones.
  3028. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  3029. if not self.shared_token_embeddings_found:
  3030. name = "shared.weight"
  3031. self.shared_token_embeddings_found = True
  3032. else:
  3033. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  3034. return []
  3035. return [(self.map_tensor_name(name), data_torch)]
  3036. @Model.register("JAISLMHeadModel")
  3037. class JaisModel(Model):
  3038. model_arch = gguf.MODEL_ARCH.JAIS
  3039. def __init__(self, *args, **kwargs):
  3040. super().__init__(*args, **kwargs)
  3041. # SwigLU activation
  3042. assert self.hparams["activation_function"] == "swiglu"
  3043. # ALiBi position embedding
  3044. assert self.hparams["position_embedding_type"] == "alibi"
  3045. # Embeddings scale
  3046. self.embeddings_scale = 1.0
  3047. if 'mup_embeddings_scale' in self.hparams:
  3048. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  3049. elif 'embeddings_scale' in self.hparams:
  3050. self.embeddings_scale = self.hparams['embeddings_scale']
  3051. else:
  3052. assert False
  3053. self.width_scale = 1.0
  3054. if 'mup_output_alpha' in self.hparams:
  3055. assert 'mup_width_scale' in self.hparams
  3056. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  3057. elif 'width_scale' in self.hparams:
  3058. self.width_scale = self.hparams['width_scale']
  3059. else:
  3060. assert False
  3061. self.max_alibi_bias = 8.0
  3062. def set_vocab(self):
  3063. self._set_vocab_gpt2()
  3064. def set_gguf_parameters(self):
  3065. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  3066. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  3067. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  3068. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  3069. self.gguf_writer.add_head_count(self.hparams["n_head"])
  3070. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  3071. self.gguf_writer.add_file_type(self.ftype)
  3072. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3073. del bid # unused
  3074. tensors: list[tuple[str, Tensor]] = []
  3075. # we don't need these
  3076. if name.endswith((".attn.bias")):
  3077. return tensors
  3078. if name.endswith(("relative_pe.slopes")):
  3079. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  3080. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  3081. # but Jais's PyTorch model simply precalculates the slope values and places them
  3082. # in relative_pes.slopes
  3083. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  3084. first_val = float(data_torch[0].item())
  3085. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  3086. return tensors
  3087. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  3088. data_torch = data_torch.transpose(1, 0)
  3089. new_name = self.map_tensor_name(name)
  3090. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  3091. tensors.append((new_name, data_torch * self.embeddings_scale))
  3092. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  3093. tensors.append((new_name, data_torch * self.width_scale))
  3094. else:
  3095. tensors.append((new_name, data_torch))
  3096. return tensors
  3097. def prepare_tensors(self):
  3098. super().prepare_tensors()
  3099. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  3100. @Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration")
  3101. class ChatGLMModel(Model):
  3102. model_arch = gguf.MODEL_ARCH.CHATGLM
  3103. def set_vocab_chatglm3(self):
  3104. dir_model = self.dir_model
  3105. hparams = self.hparams
  3106. tokens: list[bytes] = []
  3107. toktypes: list[int] = []
  3108. scores: list[float] = []
  3109. from transformers import AutoTokenizer
  3110. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  3111. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  3112. assert max(tokenizer.get_vocab().values()) < vocab_size
  3113. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  3114. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  3115. for token_id in range(vocab_size):
  3116. piece = tokenizer._convert_id_to_token(token_id)
  3117. if token_id == 0:
  3118. piece = "<unk>"
  3119. elif token_id == 1:
  3120. piece = "<bos>"
  3121. elif token_id == 2:
  3122. piece = "<eos>"
  3123. text = piece.encode("utf-8")
  3124. score = 0.0
  3125. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  3126. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  3127. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  3128. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  3129. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  3130. if piece in special_tokens:
  3131. toktype = SentencePieceTokenTypes.CONTROL
  3132. elif len(piece) == 0:
  3133. text = f"[PAD{token_id}]".encode("utf-8")
  3134. toktype = SentencePieceTokenTypes.UNUSED
  3135. else:
  3136. toktype = SentencePieceTokenTypes.USER_DEFINED
  3137. tokens.append(text)
  3138. scores.append(score)
  3139. toktypes.append(toktype)
  3140. continue
  3141. toktype = SentencePieceTokenTypes.NORMAL
  3142. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  3143. toktype = SentencePieceTokenTypes.UNKNOWN
  3144. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  3145. toktype = SentencePieceTokenTypes.CONTROL
  3146. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  3147. toktype = SentencePieceTokenTypes.UNUSED
  3148. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  3149. toktype = SentencePieceTokenTypes.BYTE
  3150. tokens.append(text)
  3151. scores.append(score)
  3152. toktypes.append(toktype)
  3153. self.gguf_writer.add_tokenizer_model("llama")
  3154. # glm3 needs prefix and suffix formatted as:
  3155. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  3156. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  3157. self.gguf_writer.add_token_list(tokens)
  3158. self.gguf_writer.add_token_scores(scores)
  3159. self.gguf_writer.add_token_types(toktypes)
  3160. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  3161. special_vocab.add_to_gguf(self.gguf_writer)
  3162. @staticmethod
  3163. def token_bytes_to_string(b):
  3164. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  3165. byte_encoder = bytes_to_unicode()
  3166. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  3167. @staticmethod
  3168. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  3169. parts = [bytes([b]) for b in token]
  3170. while True:
  3171. min_idx = None
  3172. min_rank = None
  3173. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  3174. rank = mergeable_ranks.get(pair[0] + pair[1])
  3175. if rank is not None and (min_rank is None or rank < min_rank):
  3176. min_idx = i
  3177. min_rank = rank
  3178. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  3179. break
  3180. assert min_idx is not None
  3181. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  3182. return parts
  3183. def set_vocab(self):
  3184. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  3185. self.set_vocab_chatglm3()
  3186. return
  3187. dir_model = self.dir_model
  3188. hparams = self.hparams
  3189. tokens: list[str] = []
  3190. toktypes: list[int] = []
  3191. from transformers import AutoTokenizer
  3192. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  3193. vocab_size = hparams["padded_vocab_size"]
  3194. assert max(tokenizer.get_vocab().values()) < vocab_size
  3195. tokpre = self.get_vocab_base_pre(tokenizer)
  3196. merges = []
  3197. vocab = {}
  3198. mergeable_ranks = tokenizer.mergeable_ranks
  3199. for token, rank in mergeable_ranks.items():
  3200. vocab[ChatGLMModel.token_bytes_to_string(token)] = rank
  3201. if len(token) == 1:
  3202. continue
  3203. merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank)
  3204. assert len(merged) >= 2 and len(merged) <= 7
  3205. merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged)))
  3206. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  3207. added_vocab = tokenizer.get_added_vocab()
  3208. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  3209. for i in range(vocab_size):
  3210. if i not in reverse_vocab:
  3211. tokens.append(f"[PAD{i}]")
  3212. toktypes.append(gguf.TokenType.UNUSED)
  3213. elif reverse_vocab[i] in added_vocab:
  3214. tokens.append(reverse_vocab[i])
  3215. if tokenizer.added_tokens_decoder[i].special:
  3216. toktypes.append(gguf.TokenType.CONTROL)
  3217. else:
  3218. toktypes.append(gguf.TokenType.USER_DEFINED)
  3219. else:
  3220. tokens.append(reverse_vocab[i])
  3221. toktypes.append(gguf.TokenType.NORMAL)
  3222. self.gguf_writer.add_tokenizer_model("gpt2")
  3223. self.gguf_writer.add_tokenizer_pre(tokpre)
  3224. self.gguf_writer.add_token_list(tokens)
  3225. self.gguf_writer.add_token_types(toktypes)
  3226. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  3227. special_vocab.merges = merges
  3228. # only add special tokens when they were not already loaded from config.json
  3229. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  3230. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  3231. # this one is usually not in config.json anyway
  3232. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  3233. special_vocab.add_to_gguf(self.gguf_writer)
  3234. def set_gguf_parameters(self):
  3235. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  3236. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  3237. n_head_kv = self.hparams.get("multi_query_group_num", n_head)
  3238. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  3239. self.gguf_writer.add_embedding_length(n_embed)
  3240. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed))
  3241. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  3242. self.gguf_writer.add_head_count(n_head)
  3243. self.gguf_writer.add_head_count_kv(n_head_kv)
  3244. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"])
  3245. self.gguf_writer.add_file_type(self.ftype)
  3246. self.gguf_writer.add_rope_dimension_count(64)
  3247. self.gguf_writer.add_add_bos_token(False)
  3248. rope_freq = 10000
  3249. if "rope_ratio" in self.hparams:
  3250. rope_freq = rope_freq * self.hparams["rope_ratio"]
  3251. self.gguf_writer.add_rope_freq_base(rope_freq)
  3252. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3253. del bid # unused
  3254. if name.endswith(".rotary_pos_emb.inv_freq"):
  3255. return []
  3256. name = name.removeprefix("transformer.")
  3257. return [(self.map_tensor_name(name), data_torch)]
  3258. @Model.register("NemotronForCausalLM")
  3259. class NemotronModel(Model):
  3260. model_arch = gguf.MODEL_ARCH.NEMOTRON
  3261. def set_vocab(self):
  3262. self._set_vocab_sentencepiece()
  3263. self.gguf_writer.add_pad_token_id(0)
  3264. self.gguf_writer.add_unk_token_id(1)
  3265. def set_gguf_parameters(self):
  3266. super().set_gguf_parameters()
  3267. hparams = self.hparams
  3268. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  3269. f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
  3270. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  3271. # * Partial RoPE
  3272. rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
  3273. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  3274. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  3275. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  3276. # * RopeScaling for Nemotron
  3277. if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
  3278. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  3279. else:
  3280. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3281. self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
  3282. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3283. # * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
  3284. # model.layers.{l}.input_layernorm.weight
  3285. # model.layers.{l}.post_attention_layernorm.weight
  3286. # model.norm.weight
  3287. if name.endswith("norm.weight"):
  3288. data_torch = data_torch + 1
  3289. return [(self.map_tensor_name(name), data_torch)]
  3290. @Model.register("ExaoneForCausalLM")
  3291. class ExaoneModel(Model):
  3292. model_arch = gguf.MODEL_ARCH.EXAONE
  3293. def set_gguf_parameters(self):
  3294. hparams = self.hparams
  3295. assert (hparams["activation_function"] == "silu")
  3296. max_position_embeddings = hparams["max_position_embeddings"]
  3297. embed_dim = hparams["hidden_size"]
  3298. num_heads = hparams["num_attention_heads"]
  3299. num_kv_heads = hparams.get("num_key_value_heads", num_heads)
  3300. layer_norm_eps = hparams["layer_norm_epsilon"]
  3301. intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
  3302. num_layers = hparams["num_layers"]
  3303. # ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
  3304. # attention_dropout_rate = hparams["attention_dropout"]
  3305. # ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
  3306. # embed_dropout_rate = hparams["embed_dropout"]
  3307. self.gguf_writer.add_embedding_length(embed_dim)
  3308. self.gguf_writer.add_head_count(num_heads)
  3309. self.gguf_writer.add_head_count_kv(num_kv_heads)
  3310. self.gguf_writer.add_context_length(max_position_embeddings)
  3311. self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
  3312. self.gguf_writer.add_feed_forward_length(intermediate_size)
  3313. self.gguf_writer.add_block_count(num_layers)
  3314. self.gguf_writer.add_file_type(self.ftype)
  3315. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  3316. self.gguf_writer.add_rope_freq_base(rope_theta)
  3317. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
  3318. rotary_factor = rotary_factor if rotary_factor is not None else 1.0
  3319. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  3320. if hparams.get("rope_scaling") is not None and "factor" in hparams["rope_scaling"]:
  3321. if hparams["rope_scaling"].get("type") == "linear":
  3322. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  3323. self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
  3324. def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
  3325. if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
  3326. if rope_scaling.get("rope_type", '').lower() == "llama3":
  3327. base = self.hparams.get("rope_theta", 10000.0)
  3328. dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  3329. freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
  3330. factor = rope_scaling.get("factor", 8.0)
  3331. low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
  3332. high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
  3333. old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
  3334. low_freq_wavelen = old_context_len / low_freq_factor
  3335. high_freq_wavelen = old_context_len / high_freq_factor
  3336. assert low_freq_wavelen != high_freq_wavelen
  3337. rope_factors = []
  3338. for freq in freqs:
  3339. wavelen = 2 * math.pi / freq
  3340. if wavelen < high_freq_wavelen:
  3341. rope_factors.append(1)
  3342. elif wavelen > low_freq_wavelen:
  3343. rope_factors.append(factor)
  3344. else:
  3345. smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
  3346. rope_factors.append(1 / ((1 - smooth) / factor + smooth))
  3347. yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32))
  3348. @Model.register("GraniteForCausalLM")
  3349. class GraniteModel(LlamaModel):
  3350. """Conversion for IBM's GraniteForCausalLM"""
  3351. model_arch = gguf.MODEL_ARCH.GRANITE
  3352. def set_gguf_parameters(self):
  3353. """Granite uses standard llama parameters with the following differences:
  3354. - No head_dim support
  3355. - New multiplier params:
  3356. - attention_scale
  3357. - embedding_scale
  3358. - residual_scale
  3359. - logits_scaling
  3360. """
  3361. if head_dim := self.hparams.pop("head_dim", None):
  3362. logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
  3363. super().set_gguf_parameters()
  3364. # NOTE: Convert _multiplier params to _scale params for naming
  3365. # consistency
  3366. if attention_scale := self.hparams.get("attention_multiplier"):
  3367. self.gguf_writer.add_attention_scale(attention_scale)
  3368. logger.info("gguf: (granite) attention_scale = %s", attention_scale)
  3369. if embedding_scale := self.hparams.get("embedding_multiplier"):
  3370. self.gguf_writer.add_embedding_scale(embedding_scale)
  3371. logger.info("gguf: (granite) embedding_scale = %s", embedding_scale)
  3372. if residual_scale := self.hparams.get("residual_multiplier"):
  3373. self.gguf_writer.add_residual_scale(residual_scale)
  3374. logger.info("gguf: (granite) residual_scale = %s", residual_scale)
  3375. if logits_scale := self.hparams.get("logits_scaling"):
  3376. self.gguf_writer.add_logit_scale(logits_scale)
  3377. logger.info("gguf: (granite) logits_scale = %s", logits_scale)
  3378. @Model.register("GraniteMoeForCausalLM")
  3379. class GraniteMoeModel(GraniteModel):
  3380. """Conversion for IBM's GraniteMoeForCausalLM"""
  3381. model_arch = gguf.MODEL_ARCH.GRANITE_MOE
  3382. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3383. """In modeling_granitemoe, the JetMoe implementation of parallel experts
  3384. is used. This essentially merges w1 and w3 into a single tensor with 2x
  3385. the hidden size that is then split during forward. To keep compatibility
  3386. with existing mixtral support, we pull them apart here.
  3387. """
  3388. if name.endswith("block_sparse_moe.input_linear.weight"):
  3389. ffn_dim = self.hparams["intermediate_size"]
  3390. assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size"
  3391. gate, up = data_torch[..., :ffn_dim, :], data_torch[..., ffn_dim:, :]
  3392. return [
  3393. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate),
  3394. (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up),
  3395. ]
  3396. return super().modify_tensors(data_torch, name, bid)
  3397. @Model.register("ChameleonForConditionalGeneration")
  3398. @Model.register("ChameleonForCausalLM") # obsolete
  3399. class ChameleonModel(Model):
  3400. model_arch = gguf.MODEL_ARCH.CHAMELEON
  3401. def set_gguf_parameters(self):
  3402. super().set_gguf_parameters()
  3403. self.gguf_writer.add_swin_norm(self.hparams.get("swin_norm", False))
  3404. def set_vocab(self):
  3405. self._set_vocab_gpt2()
  3406. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  3407. # ignore image tokenizer for now
  3408. # TODO: remove this once image support is implemented for Chameleon
  3409. if name.startswith("model.vqmodel"):
  3410. return []
  3411. n_head = self.hparams["num_attention_heads"]
  3412. n_kv_head = self.hparams.get("num_key_value_heads")
  3413. hidden_dim = self.hparams.get("hidden_size")
  3414. if name.endswith(("q_proj.weight", "q_proj.bias")):
  3415. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  3416. if name.endswith(("k_proj.weight", "k_proj.bias")):
  3417. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  3418. if name.endswith(("q_norm.weight", "q_norm.bias")):
  3419. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim)
  3420. if name.endswith(("k_norm.weight", "k_norm.bias")):
  3421. data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim)
  3422. return [(self.map_tensor_name(name), data_torch)]
  3423. # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203
  3424. @staticmethod
  3425. def _reverse_hf_permute(data_torch, n_heads, hidden_dim):
  3426. head_dim = hidden_dim // n_heads
  3427. data_torch = data_torch[0].view(2, head_dim // 2).t().reshape(1, -1)
  3428. data_torch = data_torch.repeat_interleave(n_heads, 0)
  3429. return data_torch
  3430. ###### CONVERSION LOGIC ######
  3431. # tree of lazy tensors
  3432. class LazyTorchTensor(gguf.LazyBase):
  3433. _tensor_type = torch.Tensor
  3434. # to keep the type-checker happy
  3435. dtype: torch.dtype
  3436. shape: torch.Size
  3437. # only used when converting a torch.Tensor to a np.ndarray
  3438. _dtype_map: dict[torch.dtype, type] = {
  3439. torch.float16: np.float16,
  3440. torch.float32: np.float32,
  3441. }
  3442. # used for safetensors slices
  3443. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  3444. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  3445. _dtype_str_map: dict[str, torch.dtype] = {
  3446. "F64": torch.float64,
  3447. "F32": torch.float32,
  3448. "BF16": torch.bfloat16,
  3449. "F16": torch.float16,
  3450. # "U64": torch.uint64,
  3451. "I64": torch.int64,
  3452. # "U32": torch.uint32,
  3453. "I32": torch.int32,
  3454. # "U16": torch.uint16,
  3455. "I16": torch.int16,
  3456. "U8": torch.uint8,
  3457. "I8": torch.int8,
  3458. "BOOL": torch.bool,
  3459. "F8_E4M3": torch.float8_e4m3fn,
  3460. "F8_E5M2": torch.float8_e5m2,
  3461. }
  3462. def numpy(self) -> gguf.LazyNumpyTensor:
  3463. dtype = self._dtype_map[self.dtype]
  3464. return gguf.LazyNumpyTensor(
  3465. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  3466. args=(self,),
  3467. func=(lambda s: s.numpy())
  3468. )
  3469. @classmethod
  3470. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  3471. return torch.empty(size=shape, dtype=dtype, device="meta")
  3472. @classmethod
  3473. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  3474. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  3475. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  3476. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  3477. return cast(torch.Tensor, lazy)
  3478. @classmethod
  3479. def __torch_function__(cls, func, types, args=(), kwargs=None):
  3480. del types # unused
  3481. if kwargs is None:
  3482. kwargs = {}
  3483. if func is torch.Tensor.numpy:
  3484. return args[0].numpy()
  3485. return cls._wrap_fn(func)(*args, **kwargs)
  3486. def parse_args() -> argparse.Namespace:
  3487. parser = argparse.ArgumentParser(
  3488. description="Convert a huggingface model to a GGML compatible file")
  3489. parser.add_argument(
  3490. "--vocab-only", action="store_true",
  3491. help="extract only the vocab",
  3492. )
  3493. parser.add_argument(
  3494. "--outfile", type=Path,
  3495. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  3496. )
  3497. parser.add_argument(
  3498. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16",
  3499. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  3500. )
  3501. parser.add_argument(
  3502. "--bigendian", action="store_true",
  3503. help="model is executed on big endian machine",
  3504. )
  3505. parser.add_argument(
  3506. "model", type=Path,
  3507. help="directory containing model file",
  3508. )
  3509. parser.add_argument(
  3510. "--use-temp-file", action="store_true",
  3511. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  3512. )
  3513. parser.add_argument(
  3514. "--no-lazy", action="store_true",
  3515. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  3516. )
  3517. parser.add_argument(
  3518. "--model-name", type=str, default=None,
  3519. help="name of the model",
  3520. )
  3521. parser.add_argument(
  3522. "--verbose", action="store_true",
  3523. help="increase output verbosity",
  3524. )
  3525. parser.add_argument(
  3526. "--split-max-tensors", type=int, default=0,
  3527. help="max tensors in each split",
  3528. )
  3529. parser.add_argument(
  3530. "--split-max-size", type=str, default="0",
  3531. help="max size per split N(M|G)",
  3532. )
  3533. parser.add_argument(
  3534. "--dry-run", action="store_true",
  3535. help="only print out a split plan and exit, without writing any new files",
  3536. )
  3537. parser.add_argument(
  3538. "--no-tensor-first-split", action="store_true",
  3539. help="do not add tensors to the first split (disabled by default)"
  3540. )
  3541. parser.add_argument(
  3542. "--metadata", type=Path,
  3543. help="Specify the path for an authorship metadata override file"
  3544. )
  3545. return parser.parse_args()
  3546. def split_str_to_n_bytes(split_str: str) -> int:
  3547. if split_str.endswith("K"):
  3548. n = int(split_str[:-1]) * 1000
  3549. elif split_str.endswith("M"):
  3550. n = int(split_str[:-1]) * 1000 * 1000
  3551. elif split_str.endswith("G"):
  3552. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  3553. elif split_str.isnumeric():
  3554. n = int(split_str)
  3555. else:
  3556. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  3557. if n < 0:
  3558. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  3559. return n
  3560. def main() -> None:
  3561. args = parse_args()
  3562. if args.verbose:
  3563. logging.basicConfig(level=logging.DEBUG)
  3564. else:
  3565. logging.basicConfig(level=logging.INFO)
  3566. dir_model = args.model
  3567. if not dir_model.is_dir():
  3568. logger.error(f'Error: {args.model} is not a directory')
  3569. sys.exit(1)
  3570. ftype_map: dict[str, gguf.LlamaFileType] = {
  3571. "f32": gguf.LlamaFileType.ALL_F32,
  3572. "f16": gguf.LlamaFileType.MOSTLY_F16,
  3573. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  3574. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  3575. "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0,
  3576. "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0,
  3577. "auto": gguf.LlamaFileType.GUESSED,
  3578. }
  3579. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  3580. if args.use_temp_file and is_split:
  3581. logger.error("Error: Cannot use temp file when splitting")
  3582. sys.exit(1)
  3583. if args.outfile is not None:
  3584. fname_out = args.outfile
  3585. else:
  3586. fname_out = dir_model
  3587. logger.info(f"Loading model: {dir_model.name}")
  3588. hparams = Model.load_hparams(dir_model)
  3589. with torch.inference_mode():
  3590. output_type = ftype_map[args.outtype]
  3591. model_architecture = hparams["architectures"][0]
  3592. try:
  3593. model_class = Model.from_model_architecture(model_architecture)
  3594. except NotImplementedError:
  3595. logger.error(f"Model {model_architecture} is not supported")
  3596. sys.exit(1)
  3597. model_instance = model_class(dir_model=dir_model, ftype=output_type, fname_out=fname_out,
  3598. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  3599. eager=args.no_lazy,
  3600. metadata_override=args.metadata, model_name=args.model_name,
  3601. split_max_tensors=args.split_max_tensors,
  3602. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  3603. small_first_shard=args.no_tensor_first_split)
  3604. if args.vocab_only:
  3605. logger.info("Exporting model vocab...")
  3606. model_instance.write_vocab()
  3607. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  3608. else:
  3609. logger.info("Exporting model...")
  3610. model_instance.write()
  3611. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  3612. logger.info(f"Model successfully exported to {out_path}")
  3613. if __name__ == '__main__':
  3614. main()