convert_hf_to_gguf.py 163 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import logging
  5. import argparse
  6. import contextlib
  7. import json
  8. import os
  9. import re
  10. import sys
  11. from enum import IntEnum
  12. from pathlib import Path
  13. from hashlib import sha256
  14. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  15. import math
  16. import numpy as np
  17. import torch
  18. if TYPE_CHECKING:
  19. from torch import Tensor
  20. if 'NO_LOCAL_GGUF' not in os.environ:
  21. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  22. import gguf
  23. logger = logging.getLogger("hf-to-gguf")
  24. ###### MODEL DEFINITIONS ######
  25. class SentencePieceTokenTypes(IntEnum):
  26. NORMAL = 1
  27. UNKNOWN = 2
  28. CONTROL = 3
  29. USER_DEFINED = 4
  30. UNUSED = 5
  31. BYTE = 6
  32. AnyModel = TypeVar("AnyModel", bound="type[Model]")
  33. class Model:
  34. _model_classes: dict[str, type[Model]] = {}
  35. dir_model: Path
  36. ftype: gguf.LlamaFileType
  37. fname_out: Path
  38. is_big_endian: bool
  39. endianess: gguf.GGUFEndian
  40. use_temp_file: bool
  41. lazy: bool
  42. part_names: list[str]
  43. is_safetensors: bool
  44. hparams: dict[str, Any]
  45. block_count: int
  46. tensor_map: gguf.TensorNameMap
  47. tensor_names: set[str] | None
  48. gguf_writer: gguf.GGUFWriter
  49. model_name: str | None
  50. metadata_override: Path | None
  51. dir_model_card: Path
  52. # subclasses should define this!
  53. model_arch: gguf.MODEL_ARCH
  54. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, is_big_endian: bool = False,
  55. use_temp_file: bool = False, eager: bool = False,
  56. metadata_override: Path | None = None, model_name: str | None = None,
  57. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False):
  58. if type(self) is Model:
  59. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  60. self.dir_model = dir_model
  61. self.ftype = ftype
  62. self.fname_out = fname_out
  63. self.is_big_endian = is_big_endian
  64. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  65. self.use_temp_file = use_temp_file
  66. self.lazy = not eager
  67. self.part_names = Model.get_model_part_names(self.dir_model, "model", ".safetensors")
  68. self.is_safetensors = len(self.part_names) > 0
  69. if not self.is_safetensors:
  70. self.part_names = Model.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  71. self.hparams = Model.load_hparams(self.dir_model)
  72. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  73. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  74. self.tensor_names = None
  75. self.metadata_override = metadata_override
  76. self.model_name = model_name
  77. self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py
  78. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  79. if self.ftype == gguf.LlamaFileType.GUESSED:
  80. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  81. _, first_tensor = next(self.get_tensors())
  82. if first_tensor.dtype == torch.float16:
  83. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  84. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  85. else:
  86. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  87. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  88. # Configure GGUF Writer
  89. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  90. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  91. @classmethod
  92. def __init_subclass__(cls):
  93. # can't use an abstract property, because overriding it without type errors
  94. # would require using decorated functions instead of simply defining the property
  95. if "model_arch" not in cls.__dict__:
  96. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  97. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  98. key = next((k for k in keys if k in self.hparams), None)
  99. if key is not None:
  100. return self.hparams[key]
  101. if optional:
  102. return None
  103. raise KeyError(f"could not find any of: {keys}")
  104. def set_vocab(self):
  105. self._set_vocab_gpt2()
  106. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  107. tensor_names_from_parts: set[str] = set()
  108. if len(self.part_names) > 1:
  109. self.tensor_names = set()
  110. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  111. index_name += ".index.json"
  112. logger.info(f"gguf: loading model weight map from '{index_name}'")
  113. with open(self.dir_model / index_name, "r", encoding="utf-8") as f:
  114. index: dict[str, Any] = json.load(f)
  115. weight_map = index.get("weight_map")
  116. if weight_map is None or not isinstance(weight_map, dict):
  117. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  118. self.tensor_names.update(weight_map.keys())
  119. else:
  120. self.tensor_names = tensor_names_from_parts
  121. for part_name in self.part_names:
  122. logger.info(f"gguf: loading model part '{part_name}'")
  123. ctx: ContextManager[Any]
  124. if self.is_safetensors:
  125. from safetensors import safe_open
  126. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  127. else:
  128. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  129. with ctx as model_part:
  130. tensor_names_from_parts.update(model_part.keys())
  131. for name in model_part.keys():
  132. if self.is_safetensors:
  133. if self.lazy:
  134. data = model_part.get_slice(name)
  135. data = LazyTorchTensor.from_safetensors_slice(data)
  136. else:
  137. data = model_part.get_tensor(name)
  138. else:
  139. data = model_part[name]
  140. if self.lazy:
  141. data = LazyTorchTensor.from_eager(data)
  142. yield name, data
  143. # only verify tensor name presence; it doesn't matter if they are not in the right files
  144. if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  145. raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}")
  146. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  147. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  148. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  149. name: str = gguf.TENSOR_NAMES[key]
  150. if "{bid}" in name:
  151. assert bid is not None
  152. name = name.format(bid=bid)
  153. return name + suffix
  154. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  155. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  156. return False
  157. key_name: str = gguf.TENSOR_NAMES[key]
  158. if "{bid}" in key_name:
  159. if bid is None:
  160. return False
  161. key_name = key_name.format(bid=bid)
  162. else:
  163. if bid is not None:
  164. return False
  165. return name == (key_name + suffix)
  166. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  167. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  168. if new_name is None:
  169. raise ValueError(f"Can not map tensor {name!r}")
  170. return new_name
  171. def set_gguf_parameters(self):
  172. self.gguf_writer.add_block_count(self.block_count)
  173. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
  174. self.gguf_writer.add_context_length(n_ctx)
  175. logger.info(f"gguf: context length = {n_ctx}")
  176. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  177. self.gguf_writer.add_embedding_length(n_embd)
  178. logger.info(f"gguf: embedding length = {n_embd}")
  179. if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
  180. self.gguf_writer.add_feed_forward_length(n_ff)
  181. logger.info(f"gguf: feed forward length = {n_ff}")
  182. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  183. self.gguf_writer.add_head_count(n_head)
  184. logger.info(f"gguf: head count = {n_head}")
  185. if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
  186. self.gguf_writer.add_head_count_kv(n_head_kv)
  187. logger.info(f"gguf: key-value head count = {n_head_kv}")
  188. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  189. self.gguf_writer.add_rope_freq_base(rope_theta)
  190. logger.info(f"gguf: rope theta = {rope_theta}")
  191. if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
  192. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  193. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  194. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  195. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  196. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  197. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  198. self.gguf_writer.add_expert_count(n_experts)
  199. logger.info(f"gguf: expert count = {n_experts}")
  200. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  201. self.gguf_writer.add_expert_used_count(n_experts_used)
  202. logger.info(f"gguf: experts used count = {n_experts_used}")
  203. if (head_dim := self.hparams.get("head_dim")) is not None:
  204. self.gguf_writer.add_key_length(head_dim)
  205. self.gguf_writer.add_value_length(head_dim)
  206. self.gguf_writer.add_file_type(self.ftype)
  207. logger.info(f"gguf: file type = {self.ftype}")
  208. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  209. del bid # unused
  210. return [(self.map_tensor_name(name), data_torch)]
  211. def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  212. del name, new_name, bid, n_dims # unused
  213. return False
  214. def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  215. del name, new_name, bid, n_dims # unused
  216. return False
  217. def prepare_tensors(self):
  218. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  219. for name, data_torch in self.get_tensors():
  220. # we don't need these
  221. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  222. continue
  223. old_dtype = data_torch.dtype
  224. # convert any unsupported data types to float32
  225. if data_torch.dtype not in (torch.float16, torch.float32):
  226. data_torch = data_torch.to(torch.float32)
  227. # use the first number-like part of the tensor name as the block id
  228. bid = None
  229. for part in name.split("."):
  230. if part.isdecimal():
  231. bid = int(part)
  232. break
  233. for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)):
  234. data: np.ndarray # type hint
  235. n_dims = len(data.shape)
  236. data_dtype = data.dtype
  237. data_qtype: gguf.GGMLQuantizationType | None = None
  238. # when both are True, f32 should win
  239. extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims)
  240. extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims)
  241. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  242. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  243. extra_f32 = any(cond for cond in (
  244. extra_f32,
  245. n_dims == 1,
  246. new_name.endswith("_norm.weight"),
  247. ))
  248. # Some tensor types are always in float32
  249. extra_f32 = extra_f32 or any(self.match_model_tensor_name(new_name, key, bid) for key in (
  250. gguf.MODEL_TENSOR.FFN_GATE_INP,
  251. gguf.MODEL_TENSOR.POS_EMBD,
  252. gguf.MODEL_TENSOR.TOKEN_TYPES,
  253. ))
  254. # if f16 desired, convert any float32 2-dim weight tensors to float16
  255. extra_f16 = any(cond for cond in (
  256. extra_f16,
  257. (name.endswith(".weight") and n_dims >= 2),
  258. ))
  259. if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32:
  260. if self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  261. data = gguf.quantize_bf16(data)
  262. assert data.dtype == np.int16
  263. data_qtype = gguf.GGMLQuantizationType.BF16
  264. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0 and gguf.can_quantize_to_q8_0(data):
  265. data = gguf.quantize_q8_0(data)
  266. assert data.dtype == np.uint8
  267. data_qtype = gguf.GGMLQuantizationType.Q8_0
  268. else: # default to float16 for quantized tensors
  269. if data_dtype != np.float16:
  270. data = data.astype(np.float16)
  271. data_qtype = gguf.GGMLQuantizationType.F16
  272. if data_qtype is None: # by default, convert to float32
  273. if data_dtype != np.float32:
  274. data = data.astype(np.float32)
  275. data_qtype = gguf.GGMLQuantizationType.F32
  276. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  277. # reverse shape to make it similar to the internal ggml dimension order
  278. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  279. # n_dims is implicit in the shape
  280. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  281. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  282. def set_type(self):
  283. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  284. def prepare_metadata(self, vocab_only: bool):
  285. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  286. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params)
  287. # Fallback to model directory name if metadata name is still missing
  288. if self.metadata.name is None:
  289. self.metadata.name = self.dir_model.name
  290. # Generate parameter weight class (useful for leader boards) if not yet determined
  291. if self.metadata.size_label is None and total_params > 0:
  292. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  293. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  294. output_type: str = self.ftype.name.partition("_")[2]
  295. # Filename Output
  296. if self.fname_out.is_dir():
  297. # Generate default filename based on model specification and available metadata
  298. if not vocab_only:
  299. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  300. else:
  301. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  302. # Use the default filename
  303. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  304. else:
  305. # Output path is a custom defined templated filename
  306. # Note: `not is_dir()` is used because `.is_file()` will not detect
  307. # file template strings as it doesn't actually exist as a file
  308. # Process templated file name with the output ftype, useful with the "auto" ftype
  309. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  310. self.set_type()
  311. logger.info("Set meta model")
  312. self.metadata.set_gguf_meta_model(self.gguf_writer)
  313. logger.info("Set model parameters")
  314. self.set_gguf_parameters()
  315. logger.info("Set model tokenizer")
  316. self.set_vocab()
  317. logger.info("Set model quantization version")
  318. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  319. def write(self):
  320. self.prepare_tensors()
  321. self.prepare_metadata(vocab_only=False)
  322. self.gguf_writer.write_header_to_file(path=self.fname_out)
  323. self.gguf_writer.write_kv_data_to_file()
  324. self.gguf_writer.write_tensors_to_file(progress=True)
  325. self.gguf_writer.close()
  326. def write_vocab(self):
  327. if len(self.gguf_writer.tensors) != 1:
  328. raise ValueError('Splitting the vocabulary is not supported')
  329. self.prepare_metadata(vocab_only=True)
  330. self.gguf_writer.write_header_to_file(path=self.fname_out)
  331. self.gguf_writer.write_kv_data_to_file()
  332. self.gguf_writer.close()
  333. @staticmethod
  334. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  335. part_names: list[str] = []
  336. for filename in os.listdir(dir_model):
  337. if filename.startswith(prefix) and filename.endswith(suffix):
  338. part_names.append(filename)
  339. part_names.sort()
  340. return part_names
  341. @staticmethod
  342. def load_hparams(dir_model: Path):
  343. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  344. return json.load(f)
  345. @classmethod
  346. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  347. assert names
  348. def func(modelcls: AnyModel) -> AnyModel:
  349. for name in names:
  350. cls._model_classes[name] = modelcls
  351. return modelcls
  352. return func
  353. @classmethod
  354. def from_model_architecture(cls, arch: str) -> type[Model]:
  355. try:
  356. return cls._model_classes[arch]
  357. except KeyError:
  358. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  359. def does_token_look_special(self, token: str | bytes) -> bool:
  360. if isinstance(token, (bytes, bytearray)):
  361. token_text = token.decode(encoding="utf-8")
  362. elif isinstance(token, memoryview):
  363. token_text = token.tobytes().decode(encoding="utf-8")
  364. else:
  365. token_text = token
  366. # Some models mark some added tokens which ought to be control tokens as not special.
  367. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  368. seems_special = token_text in (
  369. "<pad>", # deepseek-coder
  370. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  371. )
  372. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  373. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  374. # TODO: should these be marked as UNUSED instead? (maybe not)
  375. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  376. return seems_special
  377. # used for GPT-2 BPE and WordPiece vocabs
  378. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  379. tokens: list[str] = []
  380. toktypes: list[int] = []
  381. from transformers import AutoTokenizer
  382. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  383. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  384. assert max(tokenizer.vocab.values()) < vocab_size
  385. tokpre = self.get_vocab_base_pre(tokenizer)
  386. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  387. added_vocab = tokenizer.get_added_vocab()
  388. for i in range(vocab_size):
  389. if i not in reverse_vocab:
  390. tokens.append(f"[PAD{i}]")
  391. toktypes.append(gguf.TokenType.UNUSED)
  392. else:
  393. token: str = reverse_vocab[i]
  394. if token in added_vocab:
  395. if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
  396. toktypes.append(gguf.TokenType.CONTROL)
  397. else:
  398. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  399. toktypes.append(gguf.TokenType.USER_DEFINED)
  400. else:
  401. toktypes.append(gguf.TokenType.NORMAL)
  402. tokens.append(token)
  403. return tokens, toktypes, tokpre
  404. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  405. # do not modify it manually!
  406. # ref: https://github.com/ggerganov/llama.cpp/pull/6920
  407. # Marker: Start get_vocab_base_pre
  408. def get_vocab_base_pre(self, tokenizer) -> str:
  409. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  410. # is specific for the BPE pre-tokenizer used by the model
  411. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  412. # use in llama.cpp to implement the same pre-tokenizer
  413. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  414. chktok = tokenizer.encode(chktxt)
  415. chkhsh = sha256(str(chktok).encode()).hexdigest()
  416. logger.debug(f"chktok: {chktok}")
  417. logger.debug(f"chkhsh: {chkhsh}")
  418. res = None
  419. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  420. # or pull the latest version of the model from Huggingface
  421. # don't edit the hashes manually!
  422. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  423. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  424. res = "llama-bpe"
  425. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  426. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  427. res = "deepseek-llm"
  428. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  429. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  430. res = "deepseek-coder"
  431. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  432. # ref: https://huggingface.co/tiiuae/falcon-7b
  433. res = "falcon"
  434. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  435. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  436. res = "bert-bge"
  437. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  438. # ref: https://huggingface.co/mosaicml/mpt-7b
  439. res = "mpt"
  440. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  441. # ref: https://huggingface.co/bigcode/starcoder2-3b
  442. res = "starcoder"
  443. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  444. # ref: https://huggingface.co/openai-community/gpt2
  445. res = "gpt-2"
  446. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  447. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  448. res = "stablelm2"
  449. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  450. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  451. res = "refact"
  452. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  453. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  454. res = "command-r"
  455. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  456. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  457. res = "qwen2"
  458. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  459. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  460. res = "olmo"
  461. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  462. # ref: https://huggingface.co/databricks/dbrx-base
  463. res = "dbrx"
  464. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  465. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  466. res = "jina-v2-en"
  467. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  468. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  469. res = "jina-v2-es"
  470. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  471. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  472. res = "jina-v2-de"
  473. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  474. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  475. res = "smaug-bpe"
  476. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  477. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  478. res = "poro-chat"
  479. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  480. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  481. res = "jina-v2-code"
  482. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
  483. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  484. res = "chatglm-bpe"
  485. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  486. # ref: https://huggingface.co/LumiOpen/Viking-7B
  487. res = "viking"
  488. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  489. # ref: https://huggingface.co/core42/jais-13b
  490. res = "jais"
  491. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  492. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  493. res = "tekken"
  494. if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
  495. # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
  496. res = "smollm"
  497. if res is None:
  498. logger.warning("\n")
  499. logger.warning("**************************************************************************************")
  500. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  501. logger.warning("** There are 2 possible reasons for this:")
  502. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  503. logger.warning("** - the pre-tokenization config has changed upstream")
  504. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  505. logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
  506. logger.warning("**")
  507. logger.warning(f"** chkhsh: {chkhsh}")
  508. logger.warning("**************************************************************************************")
  509. logger.warning("\n")
  510. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  511. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  512. logger.debug(f"chkhsh: {chkhsh}")
  513. return res
  514. # Marker: End get_vocab_base_pre
  515. def _set_vocab_gpt2(self) -> None:
  516. tokens, toktypes, tokpre = self.get_vocab_base()
  517. self.gguf_writer.add_tokenizer_model("gpt2")
  518. self.gguf_writer.add_tokenizer_pre(tokpre)
  519. self.gguf_writer.add_token_list(tokens)
  520. self.gguf_writer.add_token_types(toktypes)
  521. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  522. special_vocab.add_to_gguf(self.gguf_writer)
  523. def _set_vocab_qwen(self):
  524. dir_model = self.dir_model
  525. hparams = self.hparams
  526. tokens: list[str] = []
  527. toktypes: list[int] = []
  528. from transformers import AutoTokenizer
  529. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  530. vocab_size = hparams["vocab_size"]
  531. assert max(tokenizer.get_vocab().values()) < vocab_size
  532. tokpre = self.get_vocab_base_pre(tokenizer)
  533. merges = []
  534. vocab = {}
  535. mergeable_ranks = tokenizer.mergeable_ranks
  536. for token, rank in mergeable_ranks.items():
  537. vocab[QwenModel.token_bytes_to_string(token)] = rank
  538. if len(token) == 1:
  539. continue
  540. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  541. assert len(merged) == 2
  542. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  543. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  544. added_vocab = tokenizer.special_tokens
  545. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  546. for i in range(vocab_size):
  547. if i not in reverse_vocab:
  548. tokens.append(f"[PAD{i}]")
  549. toktypes.append(gguf.TokenType.UNUSED)
  550. elif reverse_vocab[i] in added_vocab:
  551. tokens.append(reverse_vocab[i])
  552. toktypes.append(gguf.TokenType.CONTROL)
  553. else:
  554. tokens.append(reverse_vocab[i])
  555. toktypes.append(gguf.TokenType.NORMAL)
  556. self.gguf_writer.add_tokenizer_model("gpt2")
  557. self.gguf_writer.add_tokenizer_pre(tokpre)
  558. self.gguf_writer.add_token_list(tokens)
  559. self.gguf_writer.add_token_types(toktypes)
  560. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  561. special_vocab.merges = merges
  562. # only add special tokens when they were not already loaded from config.json
  563. if len(special_vocab.special_token_ids) == 0:
  564. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  565. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  566. # this one is usually not in config.json anyway
  567. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  568. special_vocab.add_to_gguf(self.gguf_writer)
  569. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  570. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  571. self.gguf_writer.add_tokenizer_model("llama")
  572. self.gguf_writer.add_tokenizer_pre("default")
  573. self.gguf_writer.add_token_list(tokens)
  574. self.gguf_writer.add_token_scores(scores)
  575. self.gguf_writer.add_token_types(toktypes)
  576. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  577. special_vocab.add_to_gguf(self.gguf_writer)
  578. def _create_vocab_sentencepiece(self):
  579. from sentencepiece import SentencePieceProcessor
  580. tokenizer_path = self.dir_model / 'tokenizer.model'
  581. if not tokenizer_path.is_file():
  582. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  583. tokenizer = SentencePieceProcessor()
  584. tokenizer.LoadFromFile(str(tokenizer_path))
  585. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  586. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  587. scores: list[float] = [-10000.0] * vocab_size
  588. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  589. for token_id in range(tokenizer.vocab_size()):
  590. piece = tokenizer.IdToPiece(token_id)
  591. text = piece.encode("utf-8")
  592. score = tokenizer.GetScore(token_id)
  593. toktype = SentencePieceTokenTypes.NORMAL
  594. if tokenizer.IsUnknown(token_id):
  595. toktype = SentencePieceTokenTypes.UNKNOWN
  596. elif tokenizer.IsControl(token_id):
  597. toktype = SentencePieceTokenTypes.CONTROL
  598. elif tokenizer.IsUnused(token_id):
  599. toktype = SentencePieceTokenTypes.UNUSED
  600. elif tokenizer.IsByte(token_id):
  601. toktype = SentencePieceTokenTypes.BYTE
  602. tokens[token_id] = text
  603. scores[token_id] = score
  604. toktypes[token_id] = toktype
  605. added_tokens_file = self.dir_model / 'added_tokens.json'
  606. if added_tokens_file.is_file():
  607. with open(added_tokens_file, "r", encoding="utf-8") as f:
  608. added_tokens_json = json.load(f)
  609. for key in added_tokens_json:
  610. token_id = added_tokens_json[key]
  611. if token_id >= vocab_size:
  612. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  613. continue
  614. tokens[token_id] = key.encode("utf-8")
  615. scores[token_id] = -1000.0
  616. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  617. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  618. if tokenizer_config_file.is_file():
  619. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  620. tokenizer_config_json = json.load(f)
  621. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  622. for token_id, token_data in added_tokens_decoder.items():
  623. token_id = int(token_id)
  624. token: str = token_data["content"]
  625. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  626. if tokens[token_id] != token.encode("utf-8"):
  627. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  628. if token_data.get("special") or self.does_token_look_special(token):
  629. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  630. else:
  631. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  632. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  633. scores[token_id] = -1000.0
  634. tokens[token_id] = token.encode("utf-8")
  635. if vocab_size > len(tokens):
  636. pad_count = vocab_size - len(tokens)
  637. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  638. for i in range(1, pad_count + 1):
  639. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  640. scores.append(-1000.0)
  641. toktypes.append(SentencePieceTokenTypes.UNUSED)
  642. return tokens, scores, toktypes
  643. def _set_vocab_llama_hf(self):
  644. vocab = gguf.LlamaHfVocab(self.dir_model)
  645. tokens = []
  646. scores = []
  647. toktypes = []
  648. for text, score, toktype in vocab.all_tokens():
  649. tokens.append(text)
  650. scores.append(score)
  651. toktypes.append(toktype)
  652. assert len(tokens) == vocab.vocab_size
  653. self.gguf_writer.add_tokenizer_model("llama")
  654. self.gguf_writer.add_tokenizer_pre("default")
  655. self.gguf_writer.add_token_list(tokens)
  656. self.gguf_writer.add_token_scores(scores)
  657. self.gguf_writer.add_token_types(toktypes)
  658. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  659. special_vocab.add_to_gguf(self.gguf_writer)
  660. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  661. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  662. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  663. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  664. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  665. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  666. assert field # tokenizer model
  667. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  668. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  669. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  670. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  671. assert field # token list
  672. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  673. if model_name == "llama-spm":
  674. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  675. assert field # token scores
  676. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  677. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  678. assert field # token types
  679. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  680. if model_name != "llama-spm":
  681. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  682. assert field # token merges
  683. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  684. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  685. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  686. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  687. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  688. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  689. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  690. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  691. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  692. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  693. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  694. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  695. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  696. @Model.register("GPTNeoXForCausalLM")
  697. class GPTNeoXModel(Model):
  698. model_arch = gguf.MODEL_ARCH.GPTNEOX
  699. def set_gguf_parameters(self):
  700. block_count = self.hparams["num_hidden_layers"]
  701. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  702. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  703. self.gguf_writer.add_block_count(block_count)
  704. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  705. self.gguf_writer.add_rope_dimension_count(
  706. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  707. )
  708. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  709. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  710. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  711. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  712. del bid # unused
  713. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  714. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  715. tensors: list[tuple[str, Tensor]] = []
  716. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  717. # Map bloom-style qkv_linear to gpt-style qkv_linear
  718. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  719. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  720. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  721. data_torch = torch.cat(
  722. (
  723. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  724. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  725. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  726. ),
  727. dim=0,
  728. )
  729. logger.info("re-format attention.linear_qkv.weight")
  730. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  731. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  732. data_torch = torch.cat(
  733. (
  734. qkv_bias[:, 0, :].reshape((n_embed,)),
  735. qkv_bias[:, 1, :].reshape((n_embed,)),
  736. qkv_bias[:, 2, :].reshape((n_embed,)),
  737. ),
  738. dim=0,
  739. )
  740. logger.info("re-format attention.linear_qkv.bias")
  741. tensors.append((self.map_tensor_name(name), data_torch))
  742. return tensors
  743. @Model.register("BloomForCausalLM")
  744. class BloomModel(Model):
  745. model_arch = gguf.MODEL_ARCH.BLOOM
  746. def set_gguf_parameters(self):
  747. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  748. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  749. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  750. self.gguf_writer.add_embedding_length(n_embed)
  751. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  752. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  753. self.gguf_writer.add_head_count(n_head)
  754. self.gguf_writer.add_head_count_kv(n_head)
  755. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  756. self.gguf_writer.add_file_type(self.ftype)
  757. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  758. del bid # unused
  759. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  760. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  761. name = re.sub(r'transformer\.', '', name)
  762. tensors: list[tuple[str, Tensor]] = []
  763. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  764. # Map bloom-style qkv_linear to gpt-style qkv_linear
  765. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  766. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  767. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  768. data_torch = torch.cat(
  769. (
  770. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  771. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  772. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  773. ),
  774. dim=0,
  775. )
  776. logger.info("re-format attention.linear_qkv.weight")
  777. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  778. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  779. data_torch = torch.cat(
  780. (
  781. qkv_bias[:, 0, :].reshape((n_embed,)),
  782. qkv_bias[:, 1, :].reshape((n_embed,)),
  783. qkv_bias[:, 2, :].reshape((n_embed,)),
  784. ),
  785. dim=0,
  786. )
  787. logger.info("re-format attention.linear_qkv.bias")
  788. tensors.append((self.map_tensor_name(name), data_torch))
  789. if name == "word_embeddings.weight":
  790. assert self.tensor_names is not None
  791. # TODO: tie them at runtime, don't duplicate in the model file
  792. if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
  793. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  794. return tensors
  795. @Model.register("MPTForCausalLM")
  796. class MPTModel(Model):
  797. model_arch = gguf.MODEL_ARCH.MPT
  798. def set_vocab(self):
  799. try:
  800. self._set_vocab_gpt2()
  801. except Exception:
  802. # Fallback for SEA-LION model
  803. self._set_vocab_sentencepiece()
  804. self.gguf_writer.add_add_bos_token(False)
  805. self.gguf_writer.add_pad_token_id(3)
  806. self.gguf_writer.add_eos_token_id(1)
  807. self.gguf_writer.add_unk_token_id(0)
  808. def set_gguf_parameters(self):
  809. block_count = self.hparams["n_layers"]
  810. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  811. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  812. self.gguf_writer.add_block_count(block_count)
  813. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  814. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  815. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  816. self.gguf_writer.add_head_count_kv(kv_n_heads)
  817. self.gguf_writer.add_layer_norm_eps(1e-5)
  818. if self.hparams["attn_config"]["clip_qkv"] is not None:
  819. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  820. if self.hparams["attn_config"]["alibi"]:
  821. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  822. else:
  823. self.gguf_writer.add_max_alibi_bias(0.0)
  824. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  825. del bid # unused
  826. if "scales" in name:
  827. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  828. new_name = new_name.replace("scales", "act.scales")
  829. else:
  830. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  831. return [(new_name, data_torch)]
  832. @Model.register("OrionForCausalLM")
  833. class OrionModel(Model):
  834. model_arch = gguf.MODEL_ARCH.ORION
  835. def set_vocab(self):
  836. self._set_vocab_sentencepiece()
  837. def set_gguf_parameters(self):
  838. block_count = self.hparams["num_hidden_layers"]
  839. head_count = self.hparams["num_attention_heads"]
  840. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  841. ctx_length = 0
  842. if "max_sequence_length" in self.hparams:
  843. ctx_length = self.hparams["max_sequence_length"]
  844. elif "max_position_embeddings" in self.hparams:
  845. ctx_length = self.hparams["max_position_embeddings"]
  846. elif "model_max_length" in self.hparams:
  847. ctx_length = self.hparams["model_max_length"]
  848. else:
  849. raise ValueError("gguf: can not find ctx length parameter.")
  850. self.gguf_writer.add_file_type(self.ftype)
  851. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  852. self.gguf_writer.add_context_length(ctx_length)
  853. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  854. self.gguf_writer.add_block_count(block_count)
  855. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  856. self.gguf_writer.add_head_count(head_count)
  857. self.gguf_writer.add_head_count_kv(head_count_kv)
  858. # note: config provides rms norm but it is actually layer norm
  859. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  860. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  861. @Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  862. class BaichuanModel(Model):
  863. model_arch = gguf.MODEL_ARCH.BAICHUAN
  864. def set_vocab(self):
  865. self._set_vocab_sentencepiece()
  866. def set_gguf_parameters(self):
  867. block_count = self.hparams["num_hidden_layers"]
  868. head_count = self.hparams["num_attention_heads"]
  869. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  870. ctx_length = 0
  871. if "max_sequence_length" in self.hparams:
  872. ctx_length = self.hparams["max_sequence_length"]
  873. elif "max_position_embeddings" in self.hparams:
  874. ctx_length = self.hparams["max_position_embeddings"]
  875. elif "model_max_length" in self.hparams:
  876. ctx_length = self.hparams["model_max_length"]
  877. else:
  878. raise ValueError("gguf: can not find ctx length parameter.")
  879. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  880. self.gguf_writer.add_context_length(ctx_length)
  881. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  882. self.gguf_writer.add_block_count(block_count)
  883. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  884. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  885. self.gguf_writer.add_head_count(head_count)
  886. self.gguf_writer.add_head_count_kv(head_count_kv)
  887. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  888. self.gguf_writer.add_file_type(self.ftype)
  889. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  890. if self.hparams["rope_scaling"].get("type") == "linear":
  891. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  892. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  893. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  894. head_count = self.hparams["num_attention_heads"]
  895. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  896. tensors: list[tuple[str, Tensor]] = []
  897. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  898. logger.info(f"Unpacking and permuting layer {bid}")
  899. tensors = [
  900. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  901. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  902. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  903. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  904. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  905. self._reverse_hf_part(data_torch, 2)),
  906. ]
  907. else:
  908. tensors = [(self.map_tensor_name(name), data_torch)]
  909. return tensors
  910. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  911. if n_kv_head is not None and n_head != n_kv_head:
  912. n_head //= n_kv_head
  913. return (
  914. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  915. .swapaxes(1, 2)
  916. .reshape(weights.shape)
  917. )
  918. def _reverse_hf_permute_part(
  919. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  920. ) -> Tensor:
  921. r = weights.shape[0] // 3
  922. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  923. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  924. r = weights.shape[0] // 3
  925. return weights[r * n_part:r * n_part + r, ...]
  926. @Model.register("XverseForCausalLM")
  927. class XverseModel(Model):
  928. model_arch = gguf.MODEL_ARCH.XVERSE
  929. def set_vocab(self):
  930. assert (self.dir_model / "tokenizer.json").is_file()
  931. dir_model = self.dir_model
  932. hparams = self.hparams
  933. tokens: list[bytes] = []
  934. toktypes: list[int] = []
  935. from transformers import AutoTokenizer
  936. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  937. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  938. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  939. # because vocab_size is the count of items, and indexes start at 0.
  940. max_vocab_index = max(tokenizer.get_vocab().values())
  941. if max_vocab_index >= vocab_size:
  942. raise ValueError("Vocabulary size exceeds expected maximum size.")
  943. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  944. added_vocab = tokenizer.get_added_vocab()
  945. for token_id in range(vocab_size):
  946. token_text = reverse_vocab[token_id].encode('utf-8')
  947. # replace "\x00" to string with length > 0
  948. if token_text == b"\x00":
  949. toktype = gguf.TokenType.BYTE # special
  950. token_text = f"<{token_text}>".encode('utf-8')
  951. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  952. toktype = gguf.TokenType.BYTE # special
  953. elif reverse_vocab[token_id] in added_vocab:
  954. if tokenizer.added_tokens_decoder[token_id].special:
  955. toktype = gguf.TokenType.CONTROL
  956. else:
  957. toktype = gguf.TokenType.USER_DEFINED
  958. else:
  959. toktype = gguf.TokenType.NORMAL
  960. tokens.append(token_text)
  961. toktypes.append(toktype)
  962. self.gguf_writer.add_tokenizer_model("llama")
  963. self.gguf_writer.add_tokenizer_pre("default")
  964. self.gguf_writer.add_token_list(tokens)
  965. self.gguf_writer.add_token_types(toktypes)
  966. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  967. special_vocab.add_to_gguf(self.gguf_writer)
  968. def set_gguf_parameters(self):
  969. block_count = self.hparams["num_hidden_layers"]
  970. head_count = self.hparams["num_attention_heads"]
  971. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  972. ctx_length = 0
  973. if "max_sequence_length" in self.hparams:
  974. ctx_length = self.hparams["max_sequence_length"]
  975. elif "max_position_embeddings" in self.hparams:
  976. ctx_length = self.hparams["max_position_embeddings"]
  977. elif "model_max_length" in self.hparams:
  978. ctx_length = self.hparams["model_max_length"]
  979. else:
  980. raise ValueError("gguf: can not find ctx length parameter.")
  981. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  982. self.gguf_writer.add_context_length(ctx_length)
  983. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  984. self.gguf_writer.add_block_count(block_count)
  985. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  986. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  987. self.gguf_writer.add_head_count(head_count)
  988. self.gguf_writer.add_head_count_kv(head_count_kv)
  989. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  990. self.gguf_writer.add_file_type(self.ftype)
  991. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  992. if self.hparams["rope_scaling"].get("type") == "linear":
  993. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  994. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  995. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  996. del bid # unused
  997. head_count = self.hparams["num_attention_heads"]
  998. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  999. # HF models permute some of the tensors, so we need to undo that
  1000. if name.endswith("q_proj.weight"):
  1001. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  1002. if name.endswith("k_proj.weight"):
  1003. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1004. return [(self.map_tensor_name(name), data_torch)]
  1005. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1006. if n_kv_head is not None and n_head != n_kv_head:
  1007. n_head //= n_kv_head
  1008. return (
  1009. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1010. .swapaxes(1, 2)
  1011. .reshape(weights.shape)
  1012. )
  1013. @Model.register("FalconForCausalLM", "RWForCausalLM")
  1014. class FalconModel(Model):
  1015. model_arch = gguf.MODEL_ARCH.FALCON
  1016. def set_gguf_parameters(self):
  1017. block_count = self.hparams.get("num_hidden_layers")
  1018. if block_count is None:
  1019. block_count = self.hparams["n_layer"] # old name
  1020. n_head = self.hparams.get("num_attention_heads")
  1021. if n_head is None:
  1022. n_head = self.hparams["n_head"] # old name
  1023. n_head_kv = self.hparams.get("num_kv_heads")
  1024. if n_head_kv is None:
  1025. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1026. self.gguf_writer.add_context_length(2048) # not in config.json
  1027. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1028. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1029. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1030. self.gguf_writer.add_block_count(block_count)
  1031. self.gguf_writer.add_head_count(n_head)
  1032. self.gguf_writer.add_head_count_kv(n_head_kv)
  1033. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1034. self.gguf_writer.add_file_type(self.ftype)
  1035. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1036. del bid # unused
  1037. # QKV tensor transform
  1038. # The original query_key_value tensor contains n_head_kv "kv groups",
  1039. # each consisting of n_head/n_head_kv query weights followed by one key
  1040. # and one value weight (shared by all query heads in the kv group).
  1041. # This layout makes it a big pain to work with in GGML.
  1042. # So we rearrange them here,, so that we have n_head query weights
  1043. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1044. # in contiguous fashion.
  1045. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1046. if "query_key_value" in name:
  1047. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1048. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1049. head_dim = self.hparams["hidden_size"] // n_head
  1050. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1051. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1052. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1053. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1054. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1055. return [(self.map_tensor_name(name), data_torch)]
  1056. @Model.register("GPTBigCodeForCausalLM")
  1057. class StarCoderModel(Model):
  1058. model_arch = gguf.MODEL_ARCH.STARCODER
  1059. def set_gguf_parameters(self):
  1060. block_count = self.hparams["n_layer"]
  1061. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1062. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1063. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1064. self.gguf_writer.add_block_count(block_count)
  1065. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1066. self.gguf_writer.add_head_count_kv(1)
  1067. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1068. self.gguf_writer.add_file_type(self.ftype)
  1069. @Model.register("GPTRefactForCausalLM")
  1070. class RefactModel(Model):
  1071. model_arch = gguf.MODEL_ARCH.REFACT
  1072. def set_vocab(self):
  1073. super().set_vocab()
  1074. # TODO: how to determine special FIM tokens automatically?
  1075. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1076. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1077. special_vocab._set_special_token("prefix", 1)
  1078. special_vocab._set_special_token("suffix", 3)
  1079. special_vocab._set_special_token("middle", 2)
  1080. special_vocab.chat_template = None # do not add it twice
  1081. special_vocab.add_to_gguf(self.gguf_writer)
  1082. def set_gguf_parameters(self):
  1083. hidden_dim = self.hparams["n_embd"]
  1084. inner_dim = 4 * hidden_dim
  1085. hidden_dim = int(2 * inner_dim / 3)
  1086. multiple_of = 256
  1087. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1088. block_count = self.hparams["n_layer"]
  1089. # refact uses Alibi. So this is from config.json which might be used by training.
  1090. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1091. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1092. self.gguf_writer.add_feed_forward_length(ff_dim)
  1093. self.gguf_writer.add_block_count(block_count)
  1094. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1095. self.gguf_writer.add_head_count_kv(1)
  1096. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1097. self.gguf_writer.add_file_type(self.ftype)
  1098. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1099. hidden_dim = self.hparams["n_embd"]
  1100. inner_dim = 4 * hidden_dim
  1101. hidden_dim = int(2 * inner_dim / 3)
  1102. multiple_of = 256
  1103. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1104. n_head = self.hparams["n_head"]
  1105. n_head_kv = 1
  1106. head_dim = self.hparams["n_embd"] // n_head
  1107. tensors: list[tuple[str, Tensor]] = []
  1108. if bid is not None:
  1109. if name == f"transformer.h.{bid}.attn.kv.weight":
  1110. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1111. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1112. elif name == f"transformer.h.{bid}.attn.q.weight":
  1113. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1114. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1115. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1116. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1117. if len(tensors) == 0:
  1118. tensors.append((self.map_tensor_name(name), data_torch))
  1119. return tensors
  1120. @Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1121. class StableLMModel(Model):
  1122. model_arch = gguf.MODEL_ARCH.STABLELM
  1123. def set_vocab(self):
  1124. if (self.dir_model / "tokenizer.json").is_file():
  1125. self._set_vocab_gpt2()
  1126. else:
  1127. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1128. self._set_vocab_qwen()
  1129. def set_gguf_parameters(self):
  1130. hparams = self.hparams
  1131. block_count = hparams["num_hidden_layers"]
  1132. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1133. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1134. self.gguf_writer.add_block_count(block_count)
  1135. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1136. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1137. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1138. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1139. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1140. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1141. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1142. self.gguf_writer.add_file_type(self.ftype)
  1143. _q_norms: list[dict[str, Tensor]] | None = None
  1144. _k_norms: list[dict[str, Tensor]] | None = None
  1145. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1146. n_head = self.hparams["num_attention_heads"]
  1147. n_kv_head = self.hparams["num_key_value_heads"]
  1148. if name.find("q_layernorm.norms") != -1:
  1149. assert bid is not None
  1150. if self._q_norms is None:
  1151. self._q_norms = [{} for _ in range(self.block_count)]
  1152. self._q_norms[bid][name] = data_torch
  1153. if len(self._q_norms[bid]) >= n_head:
  1154. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1155. else:
  1156. return []
  1157. if name.find("k_layernorm.norms") != -1:
  1158. assert bid is not None
  1159. if self._k_norms is None:
  1160. self._k_norms = [{} for _ in range(self.block_count)]
  1161. self._k_norms[bid][name] = data_torch
  1162. if len(self._k_norms[bid]) >= n_kv_head:
  1163. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1164. else:
  1165. return []
  1166. return [(self.map_tensor_name(name), data_torch)]
  1167. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1168. datas: list[Tensor] = []
  1169. # extract the norms in order
  1170. for xid in range(n_head):
  1171. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1172. datas.append(norms[ename])
  1173. del norms[ename]
  1174. data_torch = torch.stack(datas, dim=0)
  1175. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1176. new_name = self.map_tensor_name(merged_name)
  1177. return [(new_name, data_torch)]
  1178. def prepare_tensors(self):
  1179. super().prepare_tensors()
  1180. if self._q_norms is not None or self._k_norms is not None:
  1181. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1182. norms = (
  1183. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1184. ) + (
  1185. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1186. )
  1187. if len(norms) > 0:
  1188. raise ValueError(f"Unprocessed norms: {norms}")
  1189. @Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
  1190. class LlamaModel(Model):
  1191. model_arch = gguf.MODEL_ARCH.LLAMA
  1192. def set_vocab(self):
  1193. try:
  1194. self._set_vocab_sentencepiece()
  1195. except FileNotFoundError:
  1196. try:
  1197. self._set_vocab_llama_hf()
  1198. except (FileNotFoundError, TypeError):
  1199. # Llama 3
  1200. self._set_vocab_gpt2()
  1201. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1202. if self.hparams.get("vocab_size", 32000) == 32016:
  1203. special_vocab = gguf.SpecialVocab(
  1204. self.dir_model, load_merges=False,
  1205. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1206. )
  1207. special_vocab._set_special_token("prefix", 32007)
  1208. special_vocab._set_special_token("suffix", 32008)
  1209. special_vocab._set_special_token("middle", 32009)
  1210. special_vocab._set_special_token("eot", 32010)
  1211. special_vocab.add_to_gguf(self.gguf_writer)
  1212. def set_gguf_parameters(self):
  1213. super().set_gguf_parameters()
  1214. hparams = self.hparams
  1215. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1216. if "head_dim" in hparams:
  1217. rope_dim = hparams["head_dim"]
  1218. else:
  1219. rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
  1220. self.gguf_writer.add_rope_dimension_count(rope_dim)
  1221. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1222. if self.hparams["rope_scaling"].get("type") == "linear":
  1223. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1224. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1225. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1226. if tokenizer_config_file.is_file():
  1227. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1228. tokenizer_config_json = json.load(f)
  1229. if "add_prefix_space" in tokenizer_config_json:
  1230. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1231. # Apply to granite small models only
  1232. if self.hparams.get("vocab_size", 32000) == 49152:
  1233. self.gguf_writer.add_add_bos_token(False)
  1234. @staticmethod
  1235. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1236. if n_head_kv is not None and n_head != n_head_kv:
  1237. n_head = n_head_kv
  1238. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1239. .swapaxes(1, 2)
  1240. .reshape(weights.shape))
  1241. _experts: list[dict[str, Tensor]] | None = None
  1242. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1243. n_head = self.hparams["num_attention_heads"]
  1244. n_kv_head = self.hparams.get("num_key_value_heads")
  1245. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1246. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1247. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1248. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1249. # process the experts separately
  1250. if name.find("block_sparse_moe.experts") != -1:
  1251. n_experts = self.hparams["num_local_experts"]
  1252. assert bid is not None
  1253. if self._experts is None:
  1254. self._experts = [{} for _ in range(self.block_count)]
  1255. self._experts[bid][name] = data_torch
  1256. if len(self._experts[bid]) >= n_experts * 3:
  1257. tensors: list[tuple[str, Tensor]] = []
  1258. # merge the experts into a single 3d tensor
  1259. for wid in ["w1", "w2", "w3"]:
  1260. datas: list[Tensor] = []
  1261. for xid in range(n_experts):
  1262. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1263. datas.append(self._experts[bid][ename])
  1264. del self._experts[bid][ename]
  1265. data_torch = torch.stack(datas, dim=0)
  1266. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1267. new_name = self.map_tensor_name(merged_name)
  1268. tensors.append((new_name, data_torch))
  1269. return tensors
  1270. else:
  1271. return []
  1272. return [(self.map_tensor_name(name), data_torch)]
  1273. def prepare_tensors(self):
  1274. super().prepare_tensors()
  1275. if self._experts is not None:
  1276. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1277. experts = [k for d in self._experts for k in d.keys()]
  1278. if len(experts) > 0:
  1279. raise ValueError(f"Unprocessed experts: {experts}")
  1280. @Model.register("BitnetForCausalLM")
  1281. class BitnetModel(Model):
  1282. model_arch = gguf.MODEL_ARCH.BITNET
  1283. def set_vocab(self):
  1284. self._set_vocab_sentencepiece()
  1285. def set_gguf_parameters(self):
  1286. super().set_gguf_parameters()
  1287. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1288. self.gguf_writer.add_rope_scaling_factor(1.0)
  1289. def weight_quant(self, weight):
  1290. dtype = weight.dtype
  1291. weight = weight.float()
  1292. s = 1 / weight.abs().mean().clamp(min=1e-5)
  1293. weight = (weight * s).round().clamp(-1, 1) / s
  1294. scale = weight.abs().max().unsqueeze(0)
  1295. weight = torch.where(weight.abs().less(1e-6), 0, weight).type(dtype)
  1296. weight = torch.sign(weight).type(dtype)
  1297. return weight.type(dtype), scale.type(torch.float32)
  1298. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1299. new_name = self.map_tensor_name(name)
  1300. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  1301. gguf.MODEL_TENSOR.ATTN_Q,
  1302. gguf.MODEL_TENSOR.ATTN_K,
  1303. gguf.MODEL_TENSOR.ATTN_V,
  1304. gguf.MODEL_TENSOR.ATTN_OUT,
  1305. gguf.MODEL_TENSOR.FFN_UP,
  1306. gguf.MODEL_TENSOR.FFN_DOWN,
  1307. gguf.MODEL_TENSOR.FFN_GATE,
  1308. ]):
  1309. # transform weight into 1/0/-1 (in fp32)
  1310. weight_torch, scale_torch = self.weight_quant(data_torch)
  1311. yield (new_name, weight_torch)
  1312. yield (new_name.removesuffix(".weight") + ".scale", scale_torch)
  1313. else:
  1314. yield (new_name, data_torch)
  1315. @Model.register("GrokForCausalLM")
  1316. class GrokModel(Model):
  1317. model_arch = gguf.MODEL_ARCH.GROK
  1318. def set_vocab(self):
  1319. self._set_vocab_sentencepiece()
  1320. def __init__(self, *args, **kwargs):
  1321. super().__init__(*args, **kwargs)
  1322. def set_gguf_parameters(self):
  1323. super().set_gguf_parameters()
  1324. _experts: list[dict[str, Tensor]] | None = None
  1325. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1326. # process the experts separately
  1327. if name.find(".moe.") != -1:
  1328. n_experts = self.hparams["num_local_experts"]
  1329. assert bid is not None
  1330. if self._experts is None:
  1331. self._experts = [{} for _ in range(self.block_count)]
  1332. self._experts[bid][name] = data_torch
  1333. if len(self._experts[bid]) >= n_experts * 3:
  1334. tensors: list[tuple[str, Tensor]] = []
  1335. # merge the experts into a single 3d tensor
  1336. for wid in ["linear", "linear_1", "linear_v"]:
  1337. datas: list[Tensor] = []
  1338. for xid in range(n_experts):
  1339. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
  1340. datas.append(self._experts[bid][ename])
  1341. del self._experts[bid][ename]
  1342. data_torch = torch.stack(datas, dim=0)
  1343. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
  1344. new_name = self.map_tensor_name(merged_name)
  1345. tensors.append((new_name, data_torch))
  1346. return tensors
  1347. else:
  1348. return []
  1349. return [(self.map_tensor_name(name), data_torch)]
  1350. @Model.register("DbrxForCausalLM")
  1351. class DbrxModel(Model):
  1352. model_arch = gguf.MODEL_ARCH.DBRX
  1353. def set_gguf_parameters(self):
  1354. ffn_config = self.hparams["ffn_config"]
  1355. attn_config = self.hparams["attn_config"]
  1356. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  1357. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1358. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1359. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  1360. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1361. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  1362. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  1363. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  1364. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  1365. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  1366. self.gguf_writer.add_layer_norm_eps(1e-5)
  1367. self.gguf_writer.add_file_type(self.ftype)
  1368. logger.info(f"gguf: file type = {self.ftype}")
  1369. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1370. del bid # unused
  1371. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  1372. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  1373. n_embd = self.hparams["d_model"]
  1374. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  1375. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  1376. # But llama.cpp moe graph works differently
  1377. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  1378. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  1379. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1380. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  1381. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1382. experts = False
  1383. for exp_tensor_name in exp_tensor_names.keys():
  1384. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  1385. experts = True
  1386. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  1387. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  1388. data_torch = data_torch.permute(*permute_tensor)
  1389. break
  1390. # map tensor names
  1391. # In MoE models the ffn tensors are typically most of the model weights,
  1392. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  1393. # Every other model has the weight names ending in .weight,
  1394. # let's assume that is the convention which is not the case for dbrx:
  1395. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  1396. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  1397. return [(new_name, data_torch)]
  1398. def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  1399. del name, new_name, bid # unused
  1400. return n_dims > 1
  1401. @Model.register("MiniCPMForCausalLM")
  1402. class MiniCPMModel(Model):
  1403. model_arch = gguf.MODEL_ARCH.MINICPM
  1404. def set_gguf_parameters(self):
  1405. block_count = self.hparams["num_hidden_layers"]
  1406. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1407. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1408. self.gguf_writer.add_block_count(block_count)
  1409. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1410. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1411. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1412. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  1413. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1414. self.gguf_writer.add_file_type(self.ftype)
  1415. def set_vocab(self):
  1416. self._set_vocab_llama_hf()
  1417. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1418. if n_kv_head is not None and n_head != n_kv_head:
  1419. n_head //= n_kv_head
  1420. return (
  1421. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1422. .swapaxes(1, 2)
  1423. .reshape(weights.shape)
  1424. )
  1425. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1426. del bid # unused
  1427. n_head = self.hparams["num_attention_heads"]
  1428. n_kv_head = self.hparams.get("num_key_value_heads")
  1429. # HF models permute some of the tensors, so we need to undo that
  1430. if name.endswith(("q_proj.weight")):
  1431. data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
  1432. if name.endswith(("k_proj.weight")):
  1433. data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
  1434. return [(self.map_tensor_name(name), data_torch)]
  1435. @Model.register("QWenLMHeadModel")
  1436. class QwenModel(Model):
  1437. model_arch = gguf.MODEL_ARCH.QWEN
  1438. @staticmethod
  1439. def token_bytes_to_string(b):
  1440. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  1441. byte_encoder = bytes_to_unicode()
  1442. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  1443. @staticmethod
  1444. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  1445. parts = [bytes([b]) for b in token]
  1446. while True:
  1447. min_idx = None
  1448. min_rank = None
  1449. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  1450. rank = mergeable_ranks.get(pair[0] + pair[1])
  1451. if rank is not None and (min_rank is None or rank < min_rank):
  1452. min_idx = i
  1453. min_rank = rank
  1454. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  1455. break
  1456. assert min_idx is not None
  1457. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  1458. return parts
  1459. def set_vocab(self):
  1460. self._set_vocab_qwen()
  1461. def set_gguf_parameters(self):
  1462. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1463. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  1464. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1465. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1466. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  1467. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1468. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1469. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1470. self.gguf_writer.add_file_type(self.ftype)
  1471. @Model.register("Qwen2ForCausalLM")
  1472. class Qwen2Model(Model):
  1473. model_arch = gguf.MODEL_ARCH.QWEN2
  1474. def set_vocab(self):
  1475. try:
  1476. self._set_vocab_sentencepiece()
  1477. except FileNotFoundError:
  1478. self._set_vocab_gpt2()
  1479. @Model.register("Qwen2MoeForCausalLM")
  1480. class Qwen2MoeModel(Model):
  1481. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  1482. def set_gguf_parameters(self):
  1483. super().set_gguf_parameters()
  1484. if (n_experts := self.hparams.get("num_experts")) is not None:
  1485. self.gguf_writer.add_expert_count(n_experts)
  1486. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  1487. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  1488. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  1489. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  1490. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  1491. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  1492. _experts: list[dict[str, Tensor]] | None = None
  1493. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1494. # process the experts separately
  1495. if name.find("experts") != -1:
  1496. n_experts = self.hparams["num_experts"]
  1497. assert bid is not None
  1498. if self._experts is None:
  1499. self._experts = [{} for _ in range(self.block_count)]
  1500. self._experts[bid][name] = data_torch
  1501. if len(self._experts[bid]) >= n_experts * 3:
  1502. tensors: list[tuple[str, Tensor]] = []
  1503. # merge the experts into a single 3d tensor
  1504. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  1505. datas: list[Tensor] = []
  1506. for xid in range(n_experts):
  1507. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  1508. datas.append(self._experts[bid][ename])
  1509. del self._experts[bid][ename]
  1510. data_torch = torch.stack(datas, dim=0)
  1511. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  1512. new_name = self.map_tensor_name(merged_name)
  1513. tensors.append((new_name, data_torch))
  1514. return tensors
  1515. else:
  1516. return []
  1517. return [(self.map_tensor_name(name), data_torch)]
  1518. def prepare_tensors(self):
  1519. super().prepare_tensors()
  1520. if self._experts is not None:
  1521. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1522. experts = [k for d in self._experts for k in d.keys()]
  1523. if len(experts) > 0:
  1524. raise ValueError(f"Unprocessed experts: {experts}")
  1525. @Model.register("GPT2LMHeadModel")
  1526. class GPT2Model(Model):
  1527. model_arch = gguf.MODEL_ARCH.GPT2
  1528. def set_gguf_parameters(self):
  1529. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1530. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  1531. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1532. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1533. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1534. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1535. self.gguf_writer.add_file_type(self.ftype)
  1536. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1537. del bid # unused
  1538. tensors: list[tuple[str, Tensor]] = []
  1539. # we don't need these
  1540. if name.endswith((".attn.bias", ".attn.masked_bias")):
  1541. return tensors
  1542. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  1543. data_torch = data_torch.transpose(1, 0)
  1544. new_name = self.map_tensor_name(name)
  1545. tensors.append((new_name, data_torch))
  1546. # note: GPT2 output is tied to (same as) wte in original model
  1547. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  1548. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  1549. return tensors
  1550. @Model.register("PhiForCausalLM")
  1551. class Phi2Model(Model):
  1552. model_arch = gguf.MODEL_ARCH.PHI2
  1553. def set_gguf_parameters(self):
  1554. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  1555. rot_pct = self.find_hparam(["partial_rotary_factor"])
  1556. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  1557. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1558. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  1559. self.gguf_writer.add_embedding_length(n_embd)
  1560. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  1561. self.gguf_writer.add_block_count(block_count)
  1562. self.gguf_writer.add_head_count(n_head)
  1563. self.gguf_writer.add_head_count_kv(n_head)
  1564. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  1565. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  1566. self.gguf_writer.add_file_type(self.ftype)
  1567. self.gguf_writer.add_add_bos_token(False)
  1568. @Model.register("Phi3ForCausalLM")
  1569. class Phi3MiniModel(Model):
  1570. model_arch = gguf.MODEL_ARCH.PHI3
  1571. def set_vocab(self):
  1572. from sentencepiece import SentencePieceProcessor
  1573. tokenizer_path = self.dir_model / 'tokenizer.model'
  1574. if not tokenizer_path.is_file():
  1575. raise ValueError(f'Error: Missing {tokenizer_path}')
  1576. tokenizer = SentencePieceProcessor()
  1577. tokenizer.LoadFromFile(str(tokenizer_path))
  1578. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  1579. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  1580. scores: list[float] = [-10000.0] * vocab_size
  1581. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  1582. for token_id in range(tokenizer.vocab_size()):
  1583. piece = tokenizer.IdToPiece(token_id)
  1584. text = piece.encode("utf-8")
  1585. score = tokenizer.GetScore(token_id)
  1586. toktype = SentencePieceTokenTypes.NORMAL
  1587. if tokenizer.IsUnknown(token_id):
  1588. toktype = SentencePieceTokenTypes.UNKNOWN
  1589. elif tokenizer.IsControl(token_id):
  1590. toktype = SentencePieceTokenTypes.CONTROL
  1591. elif tokenizer.IsUnused(token_id):
  1592. toktype = SentencePieceTokenTypes.UNUSED
  1593. elif tokenizer.IsByte(token_id):
  1594. toktype = SentencePieceTokenTypes.BYTE
  1595. tokens[token_id] = text
  1596. scores[token_id] = score
  1597. toktypes[token_id] = toktype
  1598. added_tokens_file = self.dir_model / 'added_tokens.json'
  1599. if added_tokens_file.is_file():
  1600. with open(added_tokens_file, "r", encoding="utf-8") as f:
  1601. added_tokens_json = json.load(f)
  1602. for key in added_tokens_json:
  1603. token_id = added_tokens_json[key]
  1604. if token_id >= vocab_size:
  1605. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  1606. continue
  1607. tokens[token_id] = key.encode("utf-8")
  1608. scores[token_id] = -1000.0
  1609. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1610. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1611. if tokenizer_config_file.is_file():
  1612. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1613. tokenizer_config_json = json.load(f)
  1614. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  1615. for token_id, foken_data in added_tokens_decoder.items():
  1616. token_id = int(token_id)
  1617. token = foken_data["content"].encode("utf-8")
  1618. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1619. if tokens[token_id] != token:
  1620. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1621. tokens[token_id] = token
  1622. scores[token_id] = -1000.0
  1623. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1624. if foken_data.get("special"):
  1625. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1626. tokenizer_file = self.dir_model / 'tokenizer.json'
  1627. if tokenizer_file.is_file():
  1628. with open(tokenizer_file, "r", encoding="utf-8") as f:
  1629. tokenizer_json = json.load(f)
  1630. added_tokens = tokenizer_json.get("added_tokens", [])
  1631. for foken_data in added_tokens:
  1632. token_id = int(foken_data["id"])
  1633. token = foken_data["content"].encode("utf-8")
  1634. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1635. if tokens[token_id] != token:
  1636. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1637. tokens[token_id] = token
  1638. scores[token_id] = -1000.0
  1639. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1640. if foken_data.get("special"):
  1641. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1642. self.gguf_writer.add_tokenizer_model("llama")
  1643. self.gguf_writer.add_tokenizer_pre("default")
  1644. self.gguf_writer.add_token_list(tokens)
  1645. self.gguf_writer.add_token_scores(scores)
  1646. self.gguf_writer.add_token_types(toktypes)
  1647. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1648. special_vocab.add_to_gguf(self.gguf_writer)
  1649. def set_gguf_parameters(self):
  1650. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  1651. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  1652. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1653. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  1654. rms_eps = self.find_hparam(["rms_norm_eps"])
  1655. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  1656. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  1657. rope_dims = n_embd // n_head
  1658. self.gguf_writer.add_context_length(max_pos_embds)
  1659. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  1660. self.gguf_writer.add_embedding_length(n_embd)
  1661. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  1662. self.gguf_writer.add_block_count(block_count)
  1663. self.gguf_writer.add_head_count(n_head)
  1664. self.gguf_writer.add_head_count_kv(n_head_kv)
  1665. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  1666. self.gguf_writer.add_rope_dimension_count(rope_dims)
  1667. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  1668. self.gguf_writer.add_file_type(self.ftype)
  1669. # write rope scaling for long context (128k) model
  1670. rope_scaling = self.find_hparam(['rope_scaling'], True)
  1671. if rope_scaling is None:
  1672. return
  1673. scale = max_pos_embds / orig_max_pos_embds
  1674. rope_scaling_type = rope_scaling.get('type', '').lower()
  1675. if len(rope_scaling_type) == 0:
  1676. raise KeyError('Missing the required key rope_scaling.type')
  1677. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  1678. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  1679. elif rope_scaling_type == 'yarn':
  1680. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  1681. else:
  1682. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  1683. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  1684. long_factors = rope_scaling.get('long_factor', None)
  1685. short_factors = rope_scaling.get('short_factor', None)
  1686. if long_factors is None or short_factors is None:
  1687. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  1688. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  1689. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  1690. self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_LONG] + ".weight", np.array(long_factors, dtype=np.float32))
  1691. self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT] + ".weight", np.array(short_factors, dtype=np.float32))
  1692. @Model.register("PlamoForCausalLM")
  1693. class PlamoModel(Model):
  1694. model_arch = gguf.MODEL_ARCH.PLAMO
  1695. def set_vocab(self):
  1696. self._set_vocab_sentencepiece()
  1697. def set_gguf_parameters(self):
  1698. hparams = self.hparams
  1699. block_count = hparams["num_hidden_layers"]
  1700. self.gguf_writer.add_context_length(4096) # not in config.json
  1701. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1702. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1703. self.gguf_writer.add_block_count(block_count)
  1704. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1705. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  1706. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  1707. self.gguf_writer.add_file_type(self.ftype)
  1708. def shuffle_attn_q_weight(self, data_torch):
  1709. assert data_torch.size() == (5120, 5120)
  1710. data_torch = data_torch.reshape(8, 5, 128, 5120)
  1711. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  1712. data_torch = torch.reshape(data_torch, (5120, 5120))
  1713. return data_torch
  1714. def shuffle_attn_output_weight(self, data_torch):
  1715. assert data_torch.size() == (5120, 5120)
  1716. data_torch = data_torch.reshape(5120, 8, 5, 128)
  1717. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  1718. data_torch = torch.reshape(data_torch, (5120, 5120))
  1719. return data_torch
  1720. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1721. del bid # unused
  1722. new_name = self.map_tensor_name(name)
  1723. # shuffle for broadcasting of gqa in ggml_mul_mat
  1724. if new_name.endswith("attn_q.weight"):
  1725. data_torch = self.shuffle_attn_q_weight(data_torch)
  1726. elif new_name.endswith("attn_output.weight"):
  1727. data_torch = self.shuffle_attn_output_weight(data_torch)
  1728. return [(new_name, data_torch)]
  1729. @Model.register("CodeShellForCausalLM")
  1730. class CodeShellModel(Model):
  1731. model_arch = gguf.MODEL_ARCH.CODESHELL
  1732. def set_gguf_parameters(self):
  1733. block_count = self.hparams["n_layer"]
  1734. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1735. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1736. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1737. self.gguf_writer.add_block_count(block_count)
  1738. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1739. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  1740. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1741. self.gguf_writer.add_file_type(self.ftype)
  1742. self.gguf_writer.add_rope_freq_base(10000.0)
  1743. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1744. self.gguf_writer.add_rope_scaling_factor(1.0)
  1745. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1746. del bid # unused
  1747. new_name = self.map_tensor_name(name)
  1748. tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)]
  1749. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  1750. assert self.tensor_names is not None
  1751. if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
  1752. # copy tok_embd.weight to output.weight
  1753. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  1754. return tensors
  1755. @Model.register("InternLM2ForCausalLM")
  1756. class InternLM2Model(Model):
  1757. model_arch = gguf.MODEL_ARCH.INTERNLM2
  1758. def set_vocab(self):
  1759. # (TODO): Is there a better way?
  1760. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  1761. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  1762. # recognized as an empty string in C++.
  1763. from sentencepiece import SentencePieceProcessor
  1764. from sentencepiece import sentencepiece_model_pb2 as model
  1765. tokenizer_path = self.dir_model / 'tokenizer.model'
  1766. tokens: list[bytes] = []
  1767. scores: list[float] = []
  1768. toktypes: list[int] = []
  1769. if not tokenizer_path.is_file():
  1770. logger.error(f'Error: Missing {tokenizer_path}')
  1771. sys.exit(1)
  1772. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  1773. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  1774. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  1775. tokenizer = SentencePieceProcessor()
  1776. tokenizer.LoadFromFile(str(tokenizer_path))
  1777. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  1778. for token_id in range(vocab_size):
  1779. piece = tokenizer.IdToPiece(token_id)
  1780. text = piece.encode("utf-8")
  1781. score = tokenizer.GetScore(token_id)
  1782. if text == b"\x00":
  1783. # (TODO): fixme
  1784. # Hack here and replace the \x00 characters.
  1785. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  1786. text = "🐉".encode("utf-8")
  1787. toktype = SentencePieceTokenTypes.NORMAL
  1788. if tokenizer.IsUnknown(token_id):
  1789. toktype = SentencePieceTokenTypes.UNKNOWN
  1790. elif tokenizer.IsControl(token_id):
  1791. toktype = SentencePieceTokenTypes.CONTROL
  1792. elif tokenizer.IsUnused(token_id):
  1793. toktype = SentencePieceTokenTypes.UNUSED
  1794. elif tokenizer.IsByte(token_id):
  1795. toktype = SentencePieceTokenTypes.BYTE
  1796. # take care of ununsed raw token
  1797. if piece.startswith('[UNUSED'):
  1798. toktype = SentencePieceTokenTypes.UNUSED
  1799. tokens.append(text)
  1800. scores.append(score)
  1801. toktypes.append(toktype)
  1802. added_tokens_file = self.dir_model / 'added_tokens.json'
  1803. if added_tokens_file.is_file():
  1804. with open(added_tokens_file, "r", encoding="utf-8") as f:
  1805. added_tokens_json = json.load(f)
  1806. for key in added_tokens_json:
  1807. tokens.append(key.encode("utf-8"))
  1808. scores.append(-1000.0)
  1809. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  1810. chat_eos_token = '<|im_end|>'
  1811. chat_eos_token_id = None
  1812. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1813. if tokenizer_config_file.is_file():
  1814. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1815. tokenizer_config_json = json.load(f)
  1816. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  1817. for token_id, foken_data in added_tokens_decoder.items():
  1818. token_id = int(token_id)
  1819. token = foken_data["content"]
  1820. if token == chat_eos_token:
  1821. chat_eos_token_id = token_id
  1822. token = token.encode("utf-8")
  1823. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1824. if tokens[token_id] != token:
  1825. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1826. tokens[token_id] = token
  1827. scores[token_id] = -1000.0
  1828. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1829. if foken_data.get("special"):
  1830. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1831. tokenizer_file = self.dir_model / 'tokenizer.json'
  1832. if tokenizer_file.is_file():
  1833. with open(tokenizer_file, "r", encoding="utf-8") as f:
  1834. tokenizer_json = json.load(f)
  1835. added_tokens = tokenizer_json.get("added_tokens", [])
  1836. for foken_data in added_tokens:
  1837. token_id = int(foken_data["id"])
  1838. token = foken_data["content"]
  1839. if token == chat_eos_token:
  1840. chat_eos_token_id = token_id
  1841. token = token.encode("utf-8")
  1842. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1843. if tokens[token_id] != token:
  1844. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1845. tokens[token_id] = token
  1846. scores[token_id] = -1000.0
  1847. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1848. if foken_data.get("special"):
  1849. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1850. self.gguf_writer.add_tokenizer_model("llama")
  1851. self.gguf_writer.add_tokenizer_pre("default")
  1852. self.gguf_writer.add_token_list(tokens)
  1853. self.gguf_writer.add_token_scores(scores)
  1854. self.gguf_writer.add_token_types(toktypes)
  1855. self.gguf_writer.add_add_space_prefix(add_prefix)
  1856. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1857. old_eos = special_vocab.special_token_ids["eos"]
  1858. if chat_eos_token_id is not None:
  1859. # For the chat model, we replace the eos with '<|im_end|>'.
  1860. # TODO: this is a hack, should be fixed
  1861. # https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048
  1862. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  1863. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  1864. " in chat mode so that the conversation can end normally.")
  1865. special_vocab.add_to_gguf(self.gguf_writer)
  1866. def set_gguf_parameters(self):
  1867. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1868. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  1869. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1870. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1871. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  1872. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1873. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1874. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  1875. self.gguf_writer.add_file_type(self.ftype)
  1876. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1877. if self.hparams["rope_scaling"].get("type") == "linear":
  1878. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1879. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1880. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1881. num_heads = self.hparams["num_attention_heads"]
  1882. num_kv_heads = self.hparams["num_key_value_heads"]
  1883. n_embd = self.hparams["hidden_size"]
  1884. q_per_kv = num_heads // num_kv_heads
  1885. head_dim = n_embd // num_heads
  1886. num_groups = num_heads // q_per_kv
  1887. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  1888. qkv = data_torch
  1889. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  1890. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  1891. # The model weights of q and k equire additional reshape.
  1892. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  1893. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  1894. v = v.reshape((-1, v.shape[-1]))
  1895. return [
  1896. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  1897. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  1898. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  1899. ]
  1900. else:
  1901. return [(self.map_tensor_name(name), data_torch)]
  1902. @Model.register("BertModel", "CamembertModel")
  1903. class BertModel(Model):
  1904. model_arch = gguf.MODEL_ARCH.BERT
  1905. def __init__(self, *args, **kwargs):
  1906. super().__init__(*args, **kwargs)
  1907. self.vocab_size = None
  1908. def set_gguf_parameters(self):
  1909. super().set_gguf_parameters()
  1910. self.gguf_writer.add_causal_attention(False)
  1911. # get pooling path
  1912. pooling_path = None
  1913. module_path = self.dir_model / "modules.json"
  1914. if module_path.is_file():
  1915. with open(module_path, encoding="utf-8") as f:
  1916. modules = json.load(f)
  1917. for mod in modules:
  1918. if mod["type"] == "sentence_transformers.models.Pooling":
  1919. pooling_path = mod["path"]
  1920. break
  1921. # get pooling type
  1922. if pooling_path is not None:
  1923. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  1924. pooling = json.load(f)
  1925. if pooling["pooling_mode_mean_tokens"]:
  1926. pooling_type = gguf.PoolingType.MEAN
  1927. elif pooling["pooling_mode_cls_token"]:
  1928. pooling_type = gguf.PoolingType.CLS
  1929. else:
  1930. raise NotImplementedError("Only MEAN and CLS pooling types supported")
  1931. self.gguf_writer.add_pooling_type(pooling_type)
  1932. def set_vocab(self):
  1933. tokens, toktypes, tokpre = self.get_vocab_base()
  1934. self.vocab_size = len(tokens)
  1935. # we need this to validate the size of the token_type embeddings
  1936. # though currently we are passing all zeros to the token_type embeddings
  1937. self.gguf_writer.add_token_type_count(2) # "Sequence A" or "Sequence B"
  1938. # convert to phantom space vocab
  1939. def phantom(tok):
  1940. if tok.startswith("[") and tok.endswith("]"):
  1941. return tok
  1942. if tok.startswith("##"):
  1943. return tok[2:]
  1944. return "\u2581" + tok
  1945. tokens = list(map(phantom, tokens))
  1946. # add vocab to gguf
  1947. self.gguf_writer.add_tokenizer_model("bert")
  1948. self.gguf_writer.add_tokenizer_pre(tokpre)
  1949. self.gguf_writer.add_token_list(tokens)
  1950. self.gguf_writer.add_token_types(toktypes)
  1951. # handle special tokens
  1952. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1953. special_vocab.add_to_gguf(self.gguf_writer)
  1954. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1955. del bid # unused
  1956. # we are only using BERT for embeddings so we don't need the pooling layer
  1957. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  1958. return [] # we don't need these
  1959. return [(self.map_tensor_name(name), data_torch)]
  1960. @Model.register("NomicBertModel")
  1961. class NomicBertModel(BertModel):
  1962. model_arch = gguf.MODEL_ARCH.NOMIC_BERT
  1963. def __init__(self, *args, **kwargs):
  1964. super().__init__(*args, **kwargs)
  1965. # the HF config claims n_ctx=8192, but it uses RoPE scaling
  1966. self.hparams["n_ctx"] = 2048
  1967. # SwigLU activation
  1968. assert self.hparams["activation_function"] == "swiglu"
  1969. # this doesn't do anything in the HF version
  1970. assert self.hparams["causal"] is False
  1971. # no bias tensors
  1972. assert self.hparams["qkv_proj_bias"] is False
  1973. assert self.hparams["mlp_fc1_bias"] is False
  1974. assert self.hparams["mlp_fc2_bias"] is False
  1975. # norm at end of layer
  1976. assert self.hparams["prenorm"] is False
  1977. # standard RoPE
  1978. assert self.hparams["rotary_emb_fraction"] == 1.0
  1979. assert self.hparams["rotary_emb_interleaved"] is False
  1980. assert self.hparams["rotary_emb_scale_base"] is None
  1981. def set_gguf_parameters(self):
  1982. super().set_gguf_parameters()
  1983. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  1984. @Model.register("GemmaForCausalLM")
  1985. class GemmaModel(Model):
  1986. model_arch = gguf.MODEL_ARCH.GEMMA
  1987. def set_vocab(self):
  1988. self._set_vocab_sentencepiece()
  1989. # TODO: these special tokens should be exported only for the CodeGemma family
  1990. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1991. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  1992. special_vocab._set_special_token("prefix", 67)
  1993. special_vocab._set_special_token("suffix", 69)
  1994. special_vocab._set_special_token("middle", 68)
  1995. special_vocab._set_special_token("fsep", 70)
  1996. special_vocab._set_special_token("eot", 107)
  1997. special_vocab.chat_template = None # do not add it twice
  1998. special_vocab.add_to_gguf(self.gguf_writer)
  1999. self.gguf_writer.add_add_space_prefix(False)
  2000. def set_gguf_parameters(self):
  2001. hparams = self.hparams
  2002. block_count = hparams["num_hidden_layers"]
  2003. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2004. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2005. self.gguf_writer.add_block_count(block_count)
  2006. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2007. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2008. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  2009. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2010. self.gguf_writer.add_key_length(hparams["head_dim"])
  2011. self.gguf_writer.add_value_length(hparams["head_dim"])
  2012. self.gguf_writer.add_file_type(self.ftype)
  2013. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2014. del bid # unused
  2015. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  2016. # To prevent errors, skip loading lm_head.weight.
  2017. if name == "lm_head.weight":
  2018. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  2019. return []
  2020. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  2021. if name.endswith("norm.weight"):
  2022. data_torch = data_torch + 1
  2023. return [(self.map_tensor_name(name), data_torch)]
  2024. @Model.register("Gemma2ForCausalLM")
  2025. class Gemma2Model(Model):
  2026. model_arch = gguf.MODEL_ARCH.GEMMA2
  2027. def set_vocab(self):
  2028. self._set_vocab_sentencepiece()
  2029. self.gguf_writer.add_add_space_prefix(False)
  2030. def set_gguf_parameters(self):
  2031. hparams = self.hparams
  2032. block_count = hparams["num_hidden_layers"]
  2033. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2034. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2035. self.gguf_writer.add_block_count(block_count)
  2036. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2037. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2038. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  2039. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2040. self.gguf_writer.add_key_length(hparams["head_dim"])
  2041. self.gguf_writer.add_value_length(hparams["head_dim"])
  2042. self.gguf_writer.add_file_type(self.ftype)
  2043. self.gguf_writer.add_attn_logit_softcapping(
  2044. self.hparams["attn_logit_softcapping"]
  2045. )
  2046. self.gguf_writer.add_final_logit_softcapping(
  2047. self.hparams["final_logit_softcapping"]
  2048. )
  2049. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  2050. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2051. del bid # unused
  2052. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  2053. # To prevent errors, skip loading lm_head.weight.
  2054. if name == "lm_head.weight":
  2055. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  2056. return []
  2057. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  2058. if name.endswith("norm.weight"):
  2059. data_torch = data_torch + 1
  2060. return [(self.map_tensor_name(name), data_torch)]
  2061. @Model.register("Starcoder2ForCausalLM")
  2062. class StarCoder2Model(Model):
  2063. model_arch = gguf.MODEL_ARCH.STARCODER2
  2064. @Model.register("MambaForCausalLM", "MambaLMHeadModel")
  2065. class MambaModel(Model):
  2066. model_arch = gguf.MODEL_ARCH.MAMBA
  2067. def set_vocab(self):
  2068. vocab_size = self.hparams["vocab_size"]
  2069. # Round vocab size to next multiple of 8
  2070. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  2071. # pad using ceiling division
  2072. # ref: https://stackoverflow.com/a/17511341/22827863
  2073. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  2074. self.hparams["vocab_size"] = vocab_size
  2075. if (self.dir_model / "tokenizer.json").is_file():
  2076. self._set_vocab_gpt2()
  2077. elif (self.dir_model / "tokenizer.model").is_file():
  2078. self._set_vocab_sentencepiece()
  2079. else:
  2080. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  2081. self._set_vocab_builtin("gpt-neox", vocab_size)
  2082. def set_gguf_parameters(self):
  2083. d_model = self.find_hparam(["hidden_size", "d_model"])
  2084. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  2085. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  2086. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  2087. # ceiling division
  2088. # ref: https://stackoverflow.com/a/17511341/22827863
  2089. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  2090. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  2091. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  2092. # Fail early for models which don't have a block expansion factor of 2
  2093. assert d_inner == 2 * d_model
  2094. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  2095. self.gguf_writer.add_embedding_length(d_model)
  2096. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  2097. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  2098. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  2099. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  2100. self.gguf_writer.add_ssm_inner_size(d_inner)
  2101. self.gguf_writer.add_ssm_state_size(d_state)
  2102. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  2103. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  2104. self.gguf_writer.add_file_type(self.ftype)
  2105. _tok_embd = None
  2106. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2107. del bid # unused
  2108. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  2109. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  2110. new_name = self.map_tensor_name(name)
  2111. if name.endswith(".A_log"):
  2112. logger.debug("A_log --> A ==> " + new_name)
  2113. data_torch = -torch.exp(data_torch)
  2114. # assuming token_embd.weight is seen before output.weight
  2115. if self._tok_embd is not None and new_name == output_name:
  2116. if torch.equal(self._tok_embd, data_torch):
  2117. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  2118. return []
  2119. elif new_name == tok_embd_name:
  2120. self._tok_embd = data_torch
  2121. return [(new_name, data_torch)]
  2122. def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  2123. del n_dims # unused
  2124. return bid is not None and new_name in (
  2125. self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [
  2126. gguf.MODEL_TENSOR.SSM_CONV1D,
  2127. gguf.MODEL_TENSOR.SSM_X,
  2128. gguf.MODEL_TENSOR.SSM_DT,
  2129. gguf.MODEL_TENSOR.SSM_A,
  2130. gguf.MODEL_TENSOR.SSM_D,
  2131. ]
  2132. )
  2133. @Model.register("CohereForCausalLM")
  2134. class CommandR2Model(Model):
  2135. model_arch = gguf.MODEL_ARCH.COMMAND_R
  2136. def __init__(self, *args, **kwargs):
  2137. super().__init__(*args, **kwargs)
  2138. # max_position_embeddings = 8192 in config.json but model was actually
  2139. # trained on 128k context length
  2140. # aya-23 models don't have model_max_length specified
  2141. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  2142. def set_gguf_parameters(self):
  2143. super().set_gguf_parameters()
  2144. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  2145. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  2146. @Model.register("OlmoForCausalLM")
  2147. @Model.register("OLMoForCausalLM")
  2148. class OlmoModel(Model):
  2149. model_arch = gguf.MODEL_ARCH.OLMO
  2150. def set_gguf_parameters(self):
  2151. super().set_gguf_parameters()
  2152. self.gguf_writer.add_layer_norm_eps(1e-5)
  2153. clip_qkv = self.hparams.get("clip_qkv")
  2154. if clip_qkv is not None:
  2155. self.gguf_writer.add_clamp_kqv(clip_qkv)
  2156. # Same as super class, but permuting q_proj, k_proj
  2157. # Copied from: LlamaModel
  2158. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2159. del bid # unused
  2160. n_head = self.hparams["num_attention_heads"]
  2161. n_kv_head = self.hparams.get("num_key_value_heads")
  2162. if name.endswith("q_proj.weight"):
  2163. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2164. if name.endswith("k_proj.weight"):
  2165. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2166. return [(self.map_tensor_name(name), data_torch)]
  2167. @Model.register("JinaBertModel", "JinaBertForMaskedLM")
  2168. class JinaBertV2Model(BertModel):
  2169. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  2170. def __init__(self, *args, **kwargs):
  2171. super().__init__(*args, **kwargs)
  2172. self.intermediate_size = self.hparams["intermediate_size"]
  2173. def get_tensors(self):
  2174. for name, data in super().get_tensors():
  2175. if 'gated_layer' in name:
  2176. d1 = data[:self.intermediate_size, :]
  2177. name1 = name.replace('gated_layers', 'gated_layers_w')
  2178. name1 = name1.replace('up_gated_layer', 'gated_layers_v')
  2179. d2 = data[self.intermediate_size:, :]
  2180. name2 = name.replace('gated_layers', 'gated_layers_v')
  2181. name2 = name2.replace('up_gated_layer', 'gated_layers_w')
  2182. yield name1, d1
  2183. yield name2, d2
  2184. continue
  2185. yield name, data
  2186. def set_vocab(self):
  2187. tokenizer_class = 'BertTokenizer'
  2188. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  2189. tokenizer_class = json.load(f)['tokenizer_class']
  2190. if tokenizer_class == 'BertTokenizer':
  2191. super().set_vocab()
  2192. elif tokenizer_class == 'RobertaTokenizer':
  2193. self._set_vocab_gpt2()
  2194. self.gguf_writer.add_token_type_count(2)
  2195. else:
  2196. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  2197. self.gguf_writer.add_add_bos_token(True)
  2198. self.gguf_writer.add_add_eos_token(True)
  2199. @Model.register("OpenELMForCausalLM")
  2200. class OpenELMModel(Model):
  2201. model_arch = gguf.MODEL_ARCH.OPENELM
  2202. @staticmethod
  2203. def _make_divisible(v: float | int, divisor: int) -> int:
  2204. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  2205. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  2206. # Make sure that round down does not go down by more than 10%.
  2207. if new_v < 0.9 * v:
  2208. new_v += divisor
  2209. return new_v
  2210. def __init__(self, *args, **kwargs):
  2211. super().__init__(*args, **kwargs)
  2212. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  2213. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  2214. self._n_embd: int = self.hparams["model_dim"]
  2215. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  2216. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  2217. self._ffn_dims: list[int] = [
  2218. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  2219. for multiplier in ffn_multipliers
  2220. ]
  2221. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  2222. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  2223. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  2224. def set_vocab(self):
  2225. try:
  2226. self._set_vocab_sentencepiece()
  2227. except FileNotFoundError:
  2228. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  2229. def set_gguf_parameters(self):
  2230. n_embd = self._n_embd
  2231. head_dim = self.hparams["head_dim"]
  2232. rot_pct = 1.0
  2233. assert self.block_count == len(self._num_kv_heads)
  2234. assert self.block_count == len(self._num_query_heads)
  2235. assert self.block_count == len(self._ffn_dims)
  2236. self.gguf_writer.add_block_count(self.block_count)
  2237. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  2238. self.gguf_writer.add_embedding_length(n_embd)
  2239. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  2240. self.gguf_writer.add_head_count(self._num_query_heads)
  2241. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2242. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  2243. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  2244. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  2245. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  2246. self.gguf_writer.add_key_length(head_dim)
  2247. self.gguf_writer.add_value_length(head_dim)
  2248. self.gguf_writer.add_file_type(self.ftype)
  2249. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  2250. if "n_layers" in keys:
  2251. return self.hparams["num_transformer_layers"]
  2252. return super().find_hparam(keys, optional)
  2253. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2254. # split ff
  2255. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  2256. ff_dim = self._ffn_dims[bid]
  2257. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  2258. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  2259. return
  2260. yield (self.map_tensor_name(name), data_torch)
  2261. @Model.register("ArcticForCausalLM")
  2262. class ArcticModel(Model):
  2263. model_arch = gguf.MODEL_ARCH.ARCTIC
  2264. def set_vocab(self):
  2265. # The reason for using a custom implementation here is that the
  2266. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  2267. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  2268. from sentencepiece import SentencePieceProcessor
  2269. tokenizer_path = self.dir_model / 'tokenizer.model'
  2270. if not tokenizer_path.is_file():
  2271. logger.error(f'Error: Missing {tokenizer_path}')
  2272. sys.exit(1)
  2273. # Read the whole vocabulary from the tokenizer.model file
  2274. tokenizer = SentencePieceProcessor()
  2275. tokenizer.LoadFromFile(str(tokenizer_path))
  2276. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2277. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2278. scores: list[float] = [-10000.0] * vocab_size
  2279. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2280. for token_id in range(tokenizer.vocab_size()):
  2281. piece = tokenizer.IdToPiece(token_id)
  2282. text = piece.encode("utf-8")
  2283. score = tokenizer.GetScore(token_id)
  2284. toktype = SentencePieceTokenTypes.NORMAL
  2285. if tokenizer.IsUnknown(token_id):
  2286. toktype = SentencePieceTokenTypes.UNKNOWN
  2287. elif tokenizer.IsControl(token_id):
  2288. toktype = SentencePieceTokenTypes.CONTROL
  2289. elif tokenizer.IsUnused(token_id):
  2290. toktype = SentencePieceTokenTypes.UNUSED
  2291. elif tokenizer.IsByte(token_id):
  2292. toktype = SentencePieceTokenTypes.BYTE
  2293. tokens[token_id] = text
  2294. scores[token_id] = score
  2295. toktypes[token_id] = toktype
  2296. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  2297. # of information about added/redefined tokens and modify them accordingly.
  2298. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2299. if tokenizer_config_file.is_file():
  2300. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2301. tokenizer_config_json = json.load(f)
  2302. if "added_tokens_decoder" in tokenizer_config_json:
  2303. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  2304. for token_id, token_json in added_tokens_decoder.items():
  2305. token_id = int(token_id)
  2306. if token_id >= vocab_size:
  2307. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2308. continue
  2309. token_content = token_json["content"]
  2310. token_type = SentencePieceTokenTypes.USER_DEFINED
  2311. token_score = -10000.0
  2312. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  2313. # Set the score to 0.0 as in the original tokenizer.model
  2314. if ("special" in token_json) and token_json["special"]:
  2315. if token_content == tokenizer_config_json["unk_token"]:
  2316. token_type = SentencePieceTokenTypes.UNKNOWN
  2317. else:
  2318. token_type = SentencePieceTokenTypes.CONTROL
  2319. token_score = 0.0
  2320. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  2321. tokens[token_id] = token_content.encode("utf-8")
  2322. toktypes[token_id] = token_type
  2323. scores[token_id] = token_score
  2324. self.gguf_writer.add_tokenizer_model("llama")
  2325. self.gguf_writer.add_tokenizer_pre("default")
  2326. self.gguf_writer.add_token_list(tokens)
  2327. self.gguf_writer.add_token_scores(scores)
  2328. self.gguf_writer.add_token_types(toktypes)
  2329. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2330. special_vocab.add_to_gguf(self.gguf_writer)
  2331. def set_gguf_parameters(self):
  2332. super().set_gguf_parameters()
  2333. hparams = self.hparams
  2334. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2335. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  2336. _experts: list[dict[str, Tensor]] | None = None
  2337. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2338. n_head = self.hparams["num_attention_heads"]
  2339. n_kv_head = self.hparams.get("num_key_value_heads")
  2340. if name.endswith("q_proj.weight"):
  2341. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2342. if name.endswith("k_proj.weight"):
  2343. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2344. # process the experts separately
  2345. if name.find("block_sparse_moe.experts") != -1:
  2346. n_experts = self.hparams["num_local_experts"]
  2347. assert bid is not None
  2348. if self._experts is None:
  2349. self._experts = [{} for _ in range(self.block_count)]
  2350. self._experts[bid][name] = data_torch
  2351. if len(self._experts[bid]) >= n_experts * 3:
  2352. tensors: list[tuple[str, Tensor]] = []
  2353. # merge the experts into a single 3d tensor
  2354. for wid in ["w1", "w2", "w3"]:
  2355. datas: list[Tensor] = []
  2356. for xid in range(n_experts):
  2357. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  2358. datas.append(self._experts[bid][ename])
  2359. del self._experts[bid][ename]
  2360. data_torch = torch.stack(datas, dim=0)
  2361. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  2362. new_name = self.map_tensor_name(merged_name)
  2363. tensors.append((new_name, data_torch))
  2364. return tensors
  2365. else:
  2366. return []
  2367. return [(self.map_tensor_name(name), data_torch)]
  2368. def prepare_tensors(self):
  2369. super().prepare_tensors()
  2370. if self._experts is not None:
  2371. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2372. experts = [k for d in self._experts for k in d.keys()]
  2373. if len(experts) > 0:
  2374. raise ValueError(f"Unprocessed experts: {experts}")
  2375. @Model.register("DeepseekV2ForCausalLM")
  2376. class DeepseekV2Model(Model):
  2377. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  2378. def set_vocab(self):
  2379. self._set_vocab_gpt2()
  2380. def set_gguf_parameters(self):
  2381. super().set_gguf_parameters()
  2382. hparams = self.hparams
  2383. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  2384. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2385. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2386. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2387. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2388. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2389. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  2390. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  2391. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  2392. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  2393. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  2394. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2395. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2396. if self.hparams["rope_scaling"].get("type") == "yarn":
  2397. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2398. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2399. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  2400. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * hparams["rope_scaling"]["mscale_all_dim"])
  2401. _experts: list[dict[str, Tensor]] | None = None
  2402. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2403. # process the experts separately
  2404. if name.find("mlp.experts") != -1:
  2405. n_experts = self.hparams["n_routed_experts"]
  2406. assert bid is not None
  2407. if self._experts is None:
  2408. self._experts = [{} for _ in range(self.block_count)]
  2409. self._experts[bid][name] = data_torch
  2410. if len(self._experts[bid]) >= n_experts * 3:
  2411. tensors: list[tuple[str, Tensor]] = []
  2412. # merge the experts into a single 3d tensor
  2413. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  2414. datas: list[Tensor] = []
  2415. for xid in range(n_experts):
  2416. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2417. datas.append(self._experts[bid][ename])
  2418. del self._experts[bid][ename]
  2419. data_torch = torch.stack(datas, dim=0)
  2420. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2421. new_name = self.map_tensor_name(merged_name)
  2422. tensors.append((new_name, data_torch))
  2423. return tensors
  2424. else:
  2425. return []
  2426. return [(self.map_tensor_name(name), data_torch)]
  2427. def prepare_tensors(self):
  2428. super().prepare_tensors()
  2429. if self._experts is not None:
  2430. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2431. experts = [k for d in self._experts for k in d.keys()]
  2432. if len(experts) > 0:
  2433. raise ValueError(f"Unprocessed experts: {experts}")
  2434. @Model.register("T5WithLMHeadModel")
  2435. @Model.register("T5ForConditionalGeneration")
  2436. @Model.register("MT5ForConditionalGeneration")
  2437. @Model.register("UMT5ForConditionalGeneration")
  2438. class T5Model(Model):
  2439. model_arch = gguf.MODEL_ARCH.T5
  2440. def __init__(self, *args, **kwargs):
  2441. super().__init__(*args, **kwargs)
  2442. self.shared_token_embeddings_found = False
  2443. def set_vocab(self):
  2444. # to avoid TypeError: Descriptors cannot be created directly
  2445. # exception when importing sentencepiece_model_pb2
  2446. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  2447. from sentencepiece import SentencePieceProcessor
  2448. from sentencepiece import sentencepiece_model_pb2 as model
  2449. tokenizer_path = self.dir_model / 'tokenizer.model'
  2450. # many older models use spiece.model tokenizer model filename
  2451. if not tokenizer_path.is_file():
  2452. tokenizer_path = self.dir_model / 'spiece.model'
  2453. if not tokenizer_path.is_file():
  2454. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  2455. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2456. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2457. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  2458. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  2459. # assure the tokenizer model file name is correct
  2460. assert tokenizer_path.name == 'tokenizer.model'
  2461. return self._set_vocab_sentencepiece()
  2462. else:
  2463. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  2464. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2465. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  2466. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  2467. tokenizer = SentencePieceProcessor()
  2468. tokenizer.LoadFromFile(str(tokenizer_path))
  2469. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2470. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2471. scores: list[float] = [-10000.0] * vocab_size
  2472. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2473. for token_id in range(tokenizer.vocab_size()):
  2474. piece = tokenizer.IdToPiece(token_id)
  2475. text = piece.encode("utf-8")
  2476. score = tokenizer.GetScore(token_id)
  2477. toktype = SentencePieceTokenTypes.NORMAL
  2478. if tokenizer.IsUnknown(token_id):
  2479. toktype = SentencePieceTokenTypes.UNKNOWN
  2480. elif tokenizer.IsControl(token_id):
  2481. toktype = SentencePieceTokenTypes.CONTROL
  2482. elif tokenizer.IsUnused(token_id):
  2483. toktype = SentencePieceTokenTypes.UNUSED
  2484. elif tokenizer.IsByte(token_id):
  2485. toktype = SentencePieceTokenTypes.BYTE
  2486. tokens[token_id] = text
  2487. scores[token_id] = score
  2488. toktypes[token_id] = toktype
  2489. added_tokens_file = self.dir_model / 'added_tokens.json'
  2490. if added_tokens_file.is_file():
  2491. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2492. added_tokens_json = json.load(f)
  2493. for key in added_tokens_json:
  2494. token_id = added_tokens_json[key]
  2495. if token_id >= vocab_size:
  2496. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2497. continue
  2498. tokens[token_id] = key.encode("utf-8")
  2499. scores[token_id] = -1000.0
  2500. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2501. if vocab_size > len(tokens):
  2502. pad_count = vocab_size - len(tokens)
  2503. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  2504. for i in range(1, pad_count + 1):
  2505. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  2506. scores.append(-1000.0)
  2507. toktypes.append(SentencePieceTokenTypes.UNUSED)
  2508. self.gguf_writer.add_tokenizer_model("t5")
  2509. self.gguf_writer.add_tokenizer_pre("default")
  2510. self.gguf_writer.add_token_list(tokens)
  2511. self.gguf_writer.add_token_scores(scores)
  2512. self.gguf_writer.add_token_types(toktypes)
  2513. self.gguf_writer.add_add_space_prefix(add_prefix)
  2514. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  2515. if precompiled_charsmap:
  2516. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  2517. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2518. special_vocab.add_to_gguf(self.gguf_writer)
  2519. self.gguf_writer.add_add_bos_token(False)
  2520. self.gguf_writer.add_add_eos_token(True)
  2521. def set_gguf_parameters(self):
  2522. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  2523. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  2524. n_ctx = 512
  2525. self.gguf_writer.add_context_length(n_ctx)
  2526. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  2527. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  2528. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  2529. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  2530. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  2531. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  2532. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2533. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  2534. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2535. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  2536. self.gguf_writer.add_file_type(self.ftype)
  2537. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2538. del bid # unused
  2539. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  2540. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  2541. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  2542. # and decoder and ignore the remaining ones.
  2543. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  2544. if not self.shared_token_embeddings_found:
  2545. name = "shared.weight"
  2546. self.shared_token_embeddings_found = True
  2547. else:
  2548. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  2549. return []
  2550. return [(self.map_tensor_name(name), data_torch)]
  2551. @Model.register("JAISLMHeadModel")
  2552. class JaisModel(Model):
  2553. model_arch = gguf.MODEL_ARCH.JAIS
  2554. def __init__(self, *args, **kwargs):
  2555. super().__init__(*args, **kwargs)
  2556. # SwigLU activation
  2557. assert self.hparams["activation_function"] == "swiglu"
  2558. # ALiBi position embedding
  2559. assert self.hparams["position_embedding_type"] == "alibi"
  2560. # Embeddings scale
  2561. self.embeddings_scale = 1.0
  2562. # note: For some JAIS flavors, output is tied to (same as) wte in original model
  2563. self.output_is_wte = False
  2564. if 'mup_embeddings_scale' in self.hparams:
  2565. self.output_is_wte = True # Hack (?)
  2566. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  2567. elif 'embeddings_scale' in self.hparams:
  2568. self.embeddings_scale = self.hparams['embeddings_scale']
  2569. else:
  2570. assert False
  2571. self.width_scale = 1.0
  2572. if 'mup_output_alpha' in self.hparams:
  2573. assert 'mup_width_scale' in self.hparams
  2574. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  2575. elif 'width_scale' in self.hparams:
  2576. self.width_scale = self.hparams['width_scale']
  2577. else:
  2578. assert False
  2579. self.max_alibi_bias = 8.0
  2580. def set_vocab(self):
  2581. self._set_vocab_gpt2()
  2582. def set_gguf_parameters(self):
  2583. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  2584. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  2585. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2586. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  2587. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2588. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2589. self.gguf_writer.add_file_type(self.ftype)
  2590. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2591. del bid # unused
  2592. tensors: list[tuple[str, Tensor]] = []
  2593. # we don't need these
  2594. if name.endswith((".attn.bias")):
  2595. return tensors
  2596. if name.endswith(("relative_pe.slopes")):
  2597. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  2598. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  2599. # but Jais's PyTorch model simply precalculates the slope values and places them
  2600. # in relative_pes.slopes
  2601. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  2602. first_val = float(data_torch[0].item())
  2603. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  2604. return tensors
  2605. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  2606. data_torch = data_torch.transpose(1, 0)
  2607. new_name = self.map_tensor_name(name)
  2608. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  2609. tensors.append((new_name, data_torch * self.embeddings_scale))
  2610. if self.output_is_wte:
  2611. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch * self.width_scale))
  2612. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  2613. assert not self.output_is_wte
  2614. tensors.append((new_name, data_torch * self.width_scale))
  2615. else:
  2616. tensors.append((new_name, data_torch))
  2617. return tensors
  2618. def prepare_tensors(self):
  2619. super().prepare_tensors()
  2620. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  2621. @Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration")
  2622. class ChatGLMModel(Model):
  2623. model_arch = gguf.MODEL_ARCH.CHATGLM
  2624. def set_vocab_chatglm3(self):
  2625. dir_model = self.dir_model
  2626. hparams = self.hparams
  2627. tokens: list[bytes] = []
  2628. toktypes: list[int] = []
  2629. scores: list[float] = []
  2630. from transformers import AutoTokenizer
  2631. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  2632. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  2633. assert max(tokenizer.get_vocab().values()) < vocab_size
  2634. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  2635. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  2636. for token_id in range(vocab_size):
  2637. piece = tokenizer._convert_id_to_token(token_id)
  2638. if token_id == 0:
  2639. piece = "<unk>"
  2640. elif token_id == 1:
  2641. piece = "<bos>"
  2642. elif token_id == 2:
  2643. piece = "<eos>"
  2644. text = piece.encode("utf-8")
  2645. score = 0.0
  2646. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  2647. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  2648. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  2649. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  2650. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  2651. if piece in special_tokens:
  2652. toktype = SentencePieceTokenTypes.CONTROL
  2653. elif len(piece) == 0:
  2654. text = f"[PAD{token_id}]".encode("utf-8")
  2655. toktype = SentencePieceTokenTypes.UNUSED
  2656. else:
  2657. toktype = SentencePieceTokenTypes.USER_DEFINED
  2658. tokens.append(text)
  2659. scores.append(score)
  2660. toktypes.append(toktype)
  2661. continue
  2662. toktype = SentencePieceTokenTypes.NORMAL
  2663. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  2664. toktype = SentencePieceTokenTypes.UNKNOWN
  2665. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  2666. toktype = SentencePieceTokenTypes.CONTROL
  2667. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  2668. toktype = SentencePieceTokenTypes.UNUSED
  2669. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  2670. toktype = SentencePieceTokenTypes.BYTE
  2671. tokens.append(text)
  2672. scores.append(score)
  2673. toktypes.append(toktype)
  2674. self.gguf_writer.add_tokenizer_model("llama")
  2675. # glm3 needs prefix and suffix formatted as:
  2676. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  2677. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  2678. self.gguf_writer.add_token_list(tokens)
  2679. self.gguf_writer.add_token_scores(scores)
  2680. self.gguf_writer.add_token_types(toktypes)
  2681. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2682. special_vocab.add_to_gguf(self.gguf_writer)
  2683. @staticmethod
  2684. def token_bytes_to_string(b):
  2685. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2686. byte_encoder = bytes_to_unicode()
  2687. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2688. @staticmethod
  2689. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2690. parts = [bytes([b]) for b in token]
  2691. while True:
  2692. min_idx = None
  2693. min_rank = None
  2694. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2695. rank = mergeable_ranks.get(pair[0] + pair[1])
  2696. if rank is not None and (min_rank is None or rank < min_rank):
  2697. min_idx = i
  2698. min_rank = rank
  2699. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2700. break
  2701. assert min_idx is not None
  2702. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2703. return parts
  2704. def set_vocab(self):
  2705. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  2706. self.set_vocab_chatglm3()
  2707. return
  2708. dir_model = self.dir_model
  2709. hparams = self.hparams
  2710. tokens: list[str] = []
  2711. toktypes: list[int] = []
  2712. from transformers import AutoTokenizer
  2713. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  2714. vocab_size = hparams["padded_vocab_size"]
  2715. assert max(tokenizer.get_vocab().values()) < vocab_size
  2716. tokpre = self.get_vocab_base_pre(tokenizer)
  2717. merges = []
  2718. vocab = {}
  2719. mergeable_ranks = tokenizer.mergeable_ranks
  2720. for token, rank in mergeable_ranks.items():
  2721. vocab[ChatGLMModel.token_bytes_to_string(token)] = rank
  2722. if len(token) == 1:
  2723. continue
  2724. merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank)
  2725. assert len(merged) >= 2 and len(merged) <= 7
  2726. merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged)))
  2727. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  2728. added_vocab = tokenizer.get_added_vocab()
  2729. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  2730. for i in range(vocab_size):
  2731. if i not in reverse_vocab:
  2732. tokens.append(f"[PAD{i}]")
  2733. toktypes.append(gguf.TokenType.UNUSED)
  2734. elif reverse_vocab[i] in added_vocab:
  2735. tokens.append(reverse_vocab[i])
  2736. if tokenizer.added_tokens_decoder[i].special:
  2737. toktypes.append(gguf.TokenType.CONTROL)
  2738. else:
  2739. toktypes.append(gguf.TokenType.USER_DEFINED)
  2740. else:
  2741. tokens.append(reverse_vocab[i])
  2742. toktypes.append(gguf.TokenType.NORMAL)
  2743. self.gguf_writer.add_tokenizer_model("gpt2")
  2744. self.gguf_writer.add_tokenizer_pre(tokpre)
  2745. self.gguf_writer.add_token_list(tokens)
  2746. self.gguf_writer.add_token_types(toktypes)
  2747. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  2748. special_vocab.merges = merges
  2749. # only add special tokens when they were not already loaded from config.json
  2750. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  2751. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  2752. # this one is usually not in config.json anyway
  2753. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  2754. special_vocab.add_to_gguf(self.gguf_writer)
  2755. def set_gguf_parameters(self):
  2756. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  2757. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  2758. n_head_kv = self.hparams.get("multi_query_group_num", n_head)
  2759. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  2760. self.gguf_writer.add_embedding_length(n_embed)
  2761. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed))
  2762. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  2763. self.gguf_writer.add_head_count(n_head)
  2764. self.gguf_writer.add_head_count_kv(n_head_kv)
  2765. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"])
  2766. self.gguf_writer.add_file_type(self.ftype)
  2767. self.gguf_writer.add_rope_dimension_count(64)
  2768. self.gguf_writer.add_add_bos_token(False)
  2769. rope_freq = 10000
  2770. if "rope_ratio" in self.hparams:
  2771. rope_freq = rope_freq * self.hparams["rope_ratio"]
  2772. self.gguf_writer.add_rope_freq_base(rope_freq)
  2773. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2774. del bid # unused
  2775. if name.endswith(".rotary_pos_emb.inv_freq"):
  2776. return []
  2777. name = name.removeprefix("transformer.")
  2778. return [(self.map_tensor_name(name), data_torch)]
  2779. ###### CONVERSION LOGIC ######
  2780. # tree of lazy tensors
  2781. class LazyTorchTensor(gguf.LazyBase):
  2782. _tensor_type = torch.Tensor
  2783. # to keep the type-checker happy
  2784. dtype: torch.dtype
  2785. shape: torch.Size
  2786. # only used when converting a torch.Tensor to a np.ndarray
  2787. _dtype_map: dict[torch.dtype, type] = {
  2788. torch.float16: np.float16,
  2789. torch.float32: np.float32,
  2790. }
  2791. # used for safetensors slices
  2792. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  2793. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  2794. _dtype_str_map: dict[str, torch.dtype] = {
  2795. "F64": torch.float64,
  2796. "F32": torch.float32,
  2797. "BF16": torch.bfloat16,
  2798. "F16": torch.float16,
  2799. # "U64": torch.uint64,
  2800. "I64": torch.int64,
  2801. # "U32": torch.uint32,
  2802. "I32": torch.int32,
  2803. # "U16": torch.uint16,
  2804. "I16": torch.int16,
  2805. "U8": torch.uint8,
  2806. "I8": torch.int8,
  2807. "BOOL": torch.bool,
  2808. "F8_E4M3": torch.float8_e4m3fn,
  2809. "F8_E5M2": torch.float8_e5m2,
  2810. }
  2811. def numpy(self) -> gguf.LazyNumpyTensor:
  2812. dtype = self._dtype_map[self.dtype]
  2813. return gguf.LazyNumpyTensor(
  2814. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  2815. args=(self,),
  2816. func=(lambda s: s.numpy())
  2817. )
  2818. @classmethod
  2819. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  2820. return torch.empty(size=shape, dtype=dtype, device="meta")
  2821. @classmethod
  2822. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  2823. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  2824. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  2825. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  2826. return cast(torch.Tensor, lazy)
  2827. @classmethod
  2828. def __torch_function__(cls, func, types, args=(), kwargs=None):
  2829. del types # unused
  2830. if kwargs is None:
  2831. kwargs = {}
  2832. if func is torch.Tensor.numpy:
  2833. return args[0].numpy()
  2834. return cls._wrap_fn(func)(*args, **kwargs)
  2835. def parse_args() -> argparse.Namespace:
  2836. parser = argparse.ArgumentParser(
  2837. description="Convert a huggingface model to a GGML compatible file")
  2838. parser.add_argument(
  2839. "--vocab-only", action="store_true",
  2840. help="extract only the vocab",
  2841. )
  2842. parser.add_argument(
  2843. "--outfile", type=Path,
  2844. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  2845. )
  2846. parser.add_argument(
  2847. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16",
  2848. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  2849. )
  2850. parser.add_argument(
  2851. "--bigendian", action="store_true",
  2852. help="model is executed on big endian machine",
  2853. )
  2854. parser.add_argument(
  2855. "model", type=Path,
  2856. help="directory containing model file",
  2857. )
  2858. parser.add_argument(
  2859. "--use-temp-file", action="store_true",
  2860. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  2861. )
  2862. parser.add_argument(
  2863. "--no-lazy", action="store_true",
  2864. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  2865. )
  2866. parser.add_argument(
  2867. "--model-name", type=str, default=None,
  2868. help="name of the model",
  2869. )
  2870. parser.add_argument(
  2871. "--verbose", action="store_true",
  2872. help="increase output verbosity",
  2873. )
  2874. parser.add_argument(
  2875. "--split-max-tensors", type=int, default=0,
  2876. help="max tensors in each split",
  2877. )
  2878. parser.add_argument(
  2879. "--split-max-size", type=str, default="0",
  2880. help="max size per split N(M|G)",
  2881. )
  2882. parser.add_argument(
  2883. "--dry-run", action="store_true",
  2884. help="only print out a split plan and exit, without writing any new files",
  2885. )
  2886. parser.add_argument(
  2887. "--no-tensor-first-split", action="store_true",
  2888. help="do not add tensors to the first split (disabled by default)"
  2889. )
  2890. parser.add_argument(
  2891. "--metadata", type=Path,
  2892. help="Specify the path for an authorship metadata override file"
  2893. )
  2894. return parser.parse_args()
  2895. def split_str_to_n_bytes(split_str: str) -> int:
  2896. if split_str.endswith("K"):
  2897. n = int(split_str[:-1]) * 1000
  2898. elif split_str.endswith("M"):
  2899. n = int(split_str[:-1]) * 1000 * 1000
  2900. elif split_str.endswith("G"):
  2901. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  2902. elif split_str.isnumeric():
  2903. n = int(split_str)
  2904. else:
  2905. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  2906. if n < 0:
  2907. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  2908. return n
  2909. def main() -> None:
  2910. args = parse_args()
  2911. if args.verbose:
  2912. logging.basicConfig(level=logging.DEBUG)
  2913. else:
  2914. logging.basicConfig(level=logging.INFO)
  2915. dir_model = args.model
  2916. if not dir_model.is_dir():
  2917. logger.error(f'Error: {args.model} is not a directory')
  2918. sys.exit(1)
  2919. ftype_map: dict[str, gguf.LlamaFileType] = {
  2920. "f32": gguf.LlamaFileType.ALL_F32,
  2921. "f16": gguf.LlamaFileType.MOSTLY_F16,
  2922. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  2923. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  2924. "auto": gguf.LlamaFileType.GUESSED,
  2925. }
  2926. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  2927. if args.use_temp_file and is_split:
  2928. logger.error("Error: Cannot use temp file when splitting")
  2929. sys.exit(1)
  2930. if args.outfile is not None:
  2931. fname_out = args.outfile
  2932. else:
  2933. fname_out = dir_model
  2934. logger.info(f"Loading model: {dir_model.name}")
  2935. hparams = Model.load_hparams(dir_model)
  2936. with torch.inference_mode():
  2937. output_type = ftype_map[args.outtype]
  2938. model_architecture = hparams["architectures"][0]
  2939. try:
  2940. model_class = Model.from_model_architecture(model_architecture)
  2941. except NotImplementedError:
  2942. logger.error(f"Model {model_architecture} is not supported")
  2943. sys.exit(1)
  2944. model_instance = model_class(dir_model=dir_model, ftype=output_type, fname_out=fname_out,
  2945. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  2946. eager=args.no_lazy,
  2947. metadata_override=args.metadata, model_name=args.model_name,
  2948. split_max_tensors=args.split_max_tensors,
  2949. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  2950. small_first_shard=args.no_tensor_first_split)
  2951. if args.vocab_only:
  2952. logger.info("Exporting model vocab...")
  2953. model_instance.write_vocab()
  2954. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  2955. else:
  2956. logger.info("Exporting model...")
  2957. model_instance.write()
  2958. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  2959. logger.info(f"Model successfully exported to {out_path}")
  2960. if __name__ == '__main__':
  2961. main()