convert_hf_to_gguf.py 162 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. from __future__ import annotations
  4. import logging
  5. import argparse
  6. import contextlib
  7. import json
  8. import os
  9. import re
  10. import sys
  11. from enum import IntEnum
  12. from pathlib import Path
  13. from hashlib import sha256
  14. from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
  15. import math
  16. import numpy as np
  17. import torch
  18. if TYPE_CHECKING:
  19. from torch import Tensor
  20. if 'NO_LOCAL_GGUF' not in os.environ:
  21. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
  22. import gguf
  23. logger = logging.getLogger("hf-to-gguf")
  24. ###### MODEL DEFINITIONS ######
  25. class SentencePieceTokenTypes(IntEnum):
  26. NORMAL = 1
  27. UNKNOWN = 2
  28. CONTROL = 3
  29. USER_DEFINED = 4
  30. UNUSED = 5
  31. BYTE = 6
  32. AnyModel = TypeVar("AnyModel", bound="type[Model]")
  33. class Model:
  34. _model_classes: dict[str, type[Model]] = {}
  35. dir_model: Path
  36. ftype: gguf.LlamaFileType
  37. fname_out: Path | None
  38. is_big_endian: bool
  39. endianess: gguf.GGUFEndian
  40. use_temp_file: bool
  41. lazy: bool
  42. part_names: list[str]
  43. is_safetensors: bool
  44. hparams: dict[str, Any]
  45. block_count: int
  46. tensor_map: gguf.TensorNameMap
  47. tensor_names: set[str] | None
  48. gguf_writer: gguf.GGUFWriter
  49. model_name: str | None
  50. metadata_override: Path | None
  51. # subclasses should define this!
  52. model_arch: gguf.MODEL_ARCH
  53. def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path | None, is_big_endian: bool = False,
  54. use_temp_file: bool = False, eager: bool = False,
  55. metadata_override: Path | None = None, model_name: str | None = None,
  56. split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False):
  57. if type(self) is Model:
  58. raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
  59. self.dir_model = dir_model
  60. self.ftype = ftype
  61. self.fname_out = fname_out
  62. self.is_big_endian = is_big_endian
  63. self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
  64. self.use_temp_file = use_temp_file
  65. self.lazy = not eager
  66. self.part_names = Model.get_model_part_names(self.dir_model, "model", ".safetensors")
  67. self.is_safetensors = len(self.part_names) > 0
  68. if not self.is_safetensors:
  69. self.part_names = Model.get_model_part_names(self.dir_model, "pytorch_model", ".bin")
  70. self.hparams = Model.load_hparams(self.dir_model)
  71. self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"])
  72. self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
  73. self.tensor_names = None
  74. self.metadata_override = metadata_override
  75. self.model_name = model_name
  76. # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type
  77. if self.ftype == gguf.LlamaFileType.GUESSED:
  78. # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
  79. _, first_tensor = next(self.get_tensors())
  80. if first_tensor.dtype == torch.float16:
  81. logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
  82. self.ftype = gguf.LlamaFileType.MOSTLY_F16
  83. else:
  84. logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
  85. self.ftype = gguf.LlamaFileType.MOSTLY_BF16
  86. # Configure GGUF Writer
  87. self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file,
  88. split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard)
  89. @classmethod
  90. def __init_subclass__(cls):
  91. # can't use an abstract property, because overriding it without type errors
  92. # would require using decorated functions instead of simply defining the property
  93. if "model_arch" not in cls.__dict__:
  94. raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}")
  95. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  96. key = next((k for k in keys if k in self.hparams), None)
  97. if key is not None:
  98. return self.hparams[key]
  99. if optional:
  100. return None
  101. raise KeyError(f"could not find any of: {keys}")
  102. def set_vocab(self):
  103. self._set_vocab_gpt2()
  104. def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
  105. tensor_names_from_parts: set[str] = set()
  106. if len(self.part_names) > 1:
  107. self.tensor_names = set()
  108. index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin"
  109. index_name += ".index.json"
  110. logger.info(f"gguf: loading model weight map from '{index_name}'")
  111. with open(self.dir_model / index_name, "r", encoding="utf-8") as f:
  112. index: dict[str, Any] = json.load(f)
  113. weight_map = index.get("weight_map")
  114. if weight_map is None or not isinstance(weight_map, dict):
  115. raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
  116. self.tensor_names.update(weight_map.keys())
  117. else:
  118. self.tensor_names = tensor_names_from_parts
  119. for part_name in self.part_names:
  120. logger.info(f"gguf: loading model part '{part_name}'")
  121. ctx: ContextManager[Any]
  122. if self.is_safetensors:
  123. from safetensors import safe_open
  124. ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu"))
  125. else:
  126. ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True))
  127. with ctx as model_part:
  128. tensor_names_from_parts.update(model_part.keys())
  129. for name in model_part.keys():
  130. if self.is_safetensors:
  131. if self.lazy:
  132. data = model_part.get_slice(name)
  133. data = LazyTorchTensor.from_safetensors_slice(data)
  134. else:
  135. data = model_part.get_tensor(name)
  136. else:
  137. data = model_part[name]
  138. if self.lazy:
  139. data = LazyTorchTensor.from_eager(data)
  140. yield name, data
  141. # only verify tensor name presence; it doesn't matter if they are not in the right files
  142. if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0:
  143. raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}")
  144. def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
  145. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  146. raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
  147. name: str = gguf.TENSOR_NAMES[key]
  148. if "{bid}" in name:
  149. assert bid is not None
  150. name = name.format(bid=bid)
  151. return name + suffix
  152. def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
  153. if key not in gguf.MODEL_TENSORS[self.model_arch]:
  154. return False
  155. key_name: str = gguf.TENSOR_NAMES[key]
  156. if "{bid}" in key_name:
  157. if bid is None:
  158. return False
  159. key_name = key_name.format(bid=bid)
  160. else:
  161. if bid is not None:
  162. return False
  163. return name == (key_name + suffix)
  164. def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
  165. new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
  166. if new_name is None:
  167. raise ValueError(f"Can not map tensor {name!r}")
  168. return new_name
  169. def set_gguf_parameters(self):
  170. self.gguf_writer.add_block_count(self.block_count)
  171. if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
  172. self.gguf_writer.add_context_length(n_ctx)
  173. logger.info(f"gguf: context length = {n_ctx}")
  174. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  175. self.gguf_writer.add_embedding_length(n_embd)
  176. logger.info(f"gguf: embedding length = {n_embd}")
  177. if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
  178. self.gguf_writer.add_feed_forward_length(n_ff)
  179. logger.info(f"gguf: feed forward length = {n_ff}")
  180. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  181. self.gguf_writer.add_head_count(n_head)
  182. logger.info(f"gguf: head count = {n_head}")
  183. if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
  184. self.gguf_writer.add_head_count_kv(n_head_kv)
  185. logger.info(f"gguf: key-value head count = {n_head_kv}")
  186. if (rope_theta := self.hparams.get("rope_theta")) is not None:
  187. self.gguf_writer.add_rope_freq_base(rope_theta)
  188. logger.info(f"gguf: rope theta = {rope_theta}")
  189. if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
  190. self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
  191. logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
  192. if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
  193. self.gguf_writer.add_layer_norm_eps(f_norm_eps)
  194. logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
  195. if (n_experts := self.hparams.get("num_local_experts")) is not None:
  196. self.gguf_writer.add_expert_count(n_experts)
  197. logger.info(f"gguf: expert count = {n_experts}")
  198. if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
  199. self.gguf_writer.add_expert_used_count(n_experts_used)
  200. logger.info(f"gguf: experts used count = {n_experts_used}")
  201. self.gguf_writer.add_file_type(self.ftype)
  202. logger.info(f"gguf: file type = {self.ftype}")
  203. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  204. del bid # unused
  205. return [(self.map_tensor_name(name), data_torch)]
  206. def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  207. del name, new_name, bid, n_dims # unused
  208. return False
  209. def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  210. del name, new_name, bid, n_dims # unused
  211. return False
  212. def prepare_tensors(self):
  213. max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
  214. for name, data_torch in self.get_tensors():
  215. # we don't need these
  216. if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
  217. continue
  218. old_dtype = data_torch.dtype
  219. # convert any unsupported data types to float32
  220. if data_torch.dtype not in (torch.float16, torch.float32):
  221. data_torch = data_torch.to(torch.float32)
  222. # use the first number-like part of the tensor name as the block id
  223. bid = None
  224. for part in name.split("."):
  225. if part.isdecimal():
  226. bid = int(part)
  227. break
  228. for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)):
  229. data: np.ndarray # type hint
  230. n_dims = len(data.shape)
  231. data_dtype = data.dtype
  232. data_qtype: gguf.GGMLQuantizationType | None = None
  233. # when both are True, f32 should win
  234. extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims)
  235. extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims)
  236. # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
  237. # Conditions should closely match those in llama_model_quantize_internal in llama.cpp
  238. extra_f32 = any(cond for cond in (
  239. extra_f32,
  240. n_dims == 1,
  241. new_name.endswith("_norm.weight"),
  242. ))
  243. # Some tensor types are always in float32
  244. extra_f32 = extra_f32 or any(self.match_model_tensor_name(new_name, key, bid) for key in (
  245. gguf.MODEL_TENSOR.FFN_GATE_INP,
  246. gguf.MODEL_TENSOR.POS_EMBD,
  247. gguf.MODEL_TENSOR.TOKEN_TYPES,
  248. ))
  249. # if f16 desired, convert any float32 2-dim weight tensors to float16
  250. extra_f16 = any(cond for cond in (
  251. extra_f16,
  252. (name.endswith(".weight") and n_dims >= 2),
  253. ))
  254. if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32:
  255. if self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
  256. data = gguf.quantize_bf16(data)
  257. assert data.dtype == np.int16
  258. data_qtype = gguf.GGMLQuantizationType.BF16
  259. elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0 and gguf.can_quantize_to_q8_0(data):
  260. data = gguf.quantize_q8_0(data)
  261. assert data.dtype == np.uint8
  262. data_qtype = gguf.GGMLQuantizationType.Q8_0
  263. else: # default to float16 for quantized tensors
  264. if data_dtype != np.float16:
  265. data = data.astype(np.float16)
  266. data_qtype = gguf.GGMLQuantizationType.F16
  267. if data_qtype is None: # by default, convert to float32
  268. if data_dtype != np.float32:
  269. data = data.astype(np.float32)
  270. data_qtype = gguf.GGMLQuantizationType.F32
  271. shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape
  272. # reverse shape to make it similar to the internal ggml dimension order
  273. shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}"
  274. # n_dims is implicit in the shape
  275. logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
  276. self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
  277. def set_type(self):
  278. self.gguf_writer.add_type(gguf.GGUFType.MODEL)
  279. def prepare_metadata(self, vocab_only: bool):
  280. total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count()
  281. self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model, self.model_name, total_params)
  282. # Fallback to model directory name if metadata name is still missing
  283. if self.metadata.name is None:
  284. self.metadata.name = self.dir_model.name
  285. # Generate parameter weight class (useful for leader boards) if not yet determined
  286. if self.metadata.size_label is None and total_params > 0:
  287. self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count)
  288. # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0'
  289. output_type: str = self.ftype.name.partition("_")[2]
  290. # Filename Output
  291. # Note: `not is_dir()` is used because `.is_file()` will not detect
  292. # file template strings as it doesn't actually exist as a file
  293. if self.fname_out is not None and not self.fname_out.is_dir():
  294. # Output path is a custom defined templated filename
  295. # Process templated file name with the output ftype, useful with the "auto" ftype
  296. self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type)
  297. else:
  298. # Generate default filename based on model specification and available metadata
  299. if not vocab_only:
  300. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None)
  301. else:
  302. fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab")
  303. # Check if preferred output directory path was provided
  304. if self.fname_out is not None and self.fname_out.is_dir():
  305. # output path is a directory
  306. self.fname_out = self.fname_out / f"{fname_default}.gguf"
  307. else:
  308. # output in the same directory as the model by default
  309. self.fname_out = self.dir_model / f"{fname_default}.gguf"
  310. self.set_type()
  311. logger.info("Set meta model")
  312. self.metadata.set_gguf_meta_model(self.gguf_writer)
  313. logger.info("Set model parameters")
  314. self.set_gguf_parameters()
  315. logger.info("Set model tokenizer")
  316. self.set_vocab()
  317. logger.info("Set model quantization version")
  318. self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
  319. def write(self):
  320. self.prepare_tensors()
  321. self.prepare_metadata(vocab_only=False)
  322. self.gguf_writer.write_header_to_file(path=self.fname_out)
  323. self.gguf_writer.write_kv_data_to_file()
  324. self.gguf_writer.write_tensors_to_file(progress=True)
  325. self.gguf_writer.close()
  326. def write_vocab(self):
  327. if len(self.gguf_writer.tensors) != 1:
  328. raise ValueError('Splitting the vocabulary is not supported')
  329. self.prepare_metadata(vocab_only=True)
  330. self.gguf_writer.write_header_to_file(path=self.fname_out)
  331. self.gguf_writer.write_kv_data_to_file()
  332. self.gguf_writer.close()
  333. @staticmethod
  334. def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]:
  335. part_names: list[str] = []
  336. for filename in os.listdir(dir_model):
  337. if filename.startswith(prefix) and filename.endswith(suffix):
  338. part_names.append(filename)
  339. part_names.sort()
  340. return part_names
  341. @staticmethod
  342. def load_hparams(dir_model: Path):
  343. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  344. return json.load(f)
  345. @classmethod
  346. def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]:
  347. assert names
  348. def func(modelcls: AnyModel) -> AnyModel:
  349. for name in names:
  350. cls._model_classes[name] = modelcls
  351. return modelcls
  352. return func
  353. @classmethod
  354. def from_model_architecture(cls, arch: str) -> type[Model]:
  355. try:
  356. return cls._model_classes[arch]
  357. except KeyError:
  358. raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
  359. def does_token_look_special(self, token: str | bytes) -> bool:
  360. if isinstance(token, (bytes, bytearray)):
  361. token_text = token.decode(encoding="utf-8")
  362. elif isinstance(token, memoryview):
  363. token_text = token.tobytes().decode(encoding="utf-8")
  364. else:
  365. token_text = token
  366. # Some models mark some added tokens which ought to be control tokens as not special.
  367. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
  368. seems_special = token_text in (
  369. "<pad>", # deepseek-coder
  370. "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
  371. )
  372. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
  373. seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
  374. # TODO: should these be marked as UNUSED instead? (maybe not)
  375. seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
  376. return seems_special
  377. # used for GPT-2 BPE and WordPiece vocabs
  378. def get_vocab_base(self) -> tuple[list[str], list[int], str]:
  379. tokens: list[str] = []
  380. toktypes: list[int] = []
  381. from transformers import AutoTokenizer
  382. tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
  383. vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
  384. assert max(tokenizer.vocab.values()) < vocab_size
  385. tokpre = self.get_vocab_base_pre(tokenizer)
  386. reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  387. added_vocab = tokenizer.get_added_vocab()
  388. for i in range(vocab_size):
  389. if i not in reverse_vocab:
  390. tokens.append(f"[PAD{i}]")
  391. toktypes.append(gguf.TokenType.UNUSED)
  392. else:
  393. token: str = reverse_vocab[i]
  394. if token in added_vocab:
  395. if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
  396. toktypes.append(gguf.TokenType.CONTROL)
  397. else:
  398. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  399. toktypes.append(gguf.TokenType.USER_DEFINED)
  400. else:
  401. toktypes.append(gguf.TokenType.NORMAL)
  402. tokens.append(token)
  403. return tokens, toktypes, tokpre
  404. # NOTE: this function is generated by convert_hf_to_gguf_update.py
  405. # do not modify it manually!
  406. # ref: https://github.com/ggerganov/llama.cpp/pull/6920
  407. # Marker: Start get_vocab_base_pre
  408. def get_vocab_base_pre(self, tokenizer) -> str:
  409. # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
  410. # is specific for the BPE pre-tokenizer used by the model
  411. # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
  412. # use in llama.cpp to implement the same pre-tokenizer
  413. chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
  414. chktok = tokenizer.encode(chktxt)
  415. chkhsh = sha256(str(chktok).encode()).hexdigest()
  416. logger.debug(f"chktok: {chktok}")
  417. logger.debug(f"chkhsh: {chkhsh}")
  418. res = None
  419. # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
  420. # or pull the latest version of the model from Huggingface
  421. # don't edit the hashes manually!
  422. if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
  423. # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
  424. res = "llama-bpe"
  425. if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
  426. # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
  427. res = "deepseek-llm"
  428. if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821":
  429. # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base
  430. res = "deepseek-coder"
  431. if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed":
  432. # ref: https://huggingface.co/tiiuae/falcon-7b
  433. res = "falcon"
  434. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  435. # ref: https://huggingface.co/BAAI/bge-small-en-v1.5
  436. res = "bert-bge"
  437. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  438. # ref: https://huggingface.co/mosaicml/mpt-7b
  439. res = "mpt"
  440. if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34":
  441. # ref: https://huggingface.co/bigcode/starcoder2-3b
  442. res = "starcoder"
  443. if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
  444. # ref: https://huggingface.co/openai-community/gpt2
  445. res = "gpt-2"
  446. if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
  447. # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b
  448. res = "stablelm2"
  449. if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
  450. # ref: https://huggingface.co/smallcloudai/Refact-1_6-base
  451. res = "refact"
  452. if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
  453. # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
  454. res = "command-r"
  455. if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
  456. # ref: https://huggingface.co/Qwen/Qwen1.5-7B
  457. res = "qwen2"
  458. if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
  459. # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
  460. res = "olmo"
  461. if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
  462. # ref: https://huggingface.co/databricks/dbrx-base
  463. res = "dbrx"
  464. if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
  465. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
  466. res = "jina-v2-en"
  467. if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
  468. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
  469. res = "jina-v2-es"
  470. if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
  471. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
  472. res = "jina-v2-de"
  473. if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
  474. # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
  475. res = "smaug-bpe"
  476. if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
  477. # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
  478. res = "poro-chat"
  479. if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
  480. # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
  481. res = "jina-v2-code"
  482. if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
  483. # ref: https://huggingface.co/THUDM/glm-4-9b-chat
  484. res = "chatglm-bpe"
  485. if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
  486. # ref: https://huggingface.co/LumiOpen/Viking-7B
  487. res = "viking"
  488. if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
  489. # ref: https://huggingface.co/core42/jais-13b
  490. res = "jais"
  491. if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
  492. # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
  493. res = "tekken"
  494. if res is None:
  495. logger.warning("\n")
  496. logger.warning("**************************************************************************************")
  497. logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
  498. logger.warning("** There are 2 possible reasons for this:")
  499. logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
  500. logger.warning("** - the pre-tokenization config has changed upstream")
  501. logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
  502. logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
  503. logger.warning("**")
  504. logger.warning(f"** chkhsh: {chkhsh}")
  505. logger.warning("**************************************************************************************")
  506. logger.warning("\n")
  507. raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
  508. logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
  509. logger.debug(f"chkhsh: {chkhsh}")
  510. return res
  511. # Marker: End get_vocab_base_pre
  512. def _set_vocab_gpt2(self) -> None:
  513. tokens, toktypes, tokpre = self.get_vocab_base()
  514. self.gguf_writer.add_tokenizer_model("gpt2")
  515. self.gguf_writer.add_tokenizer_pre(tokpre)
  516. self.gguf_writer.add_token_list(tokens)
  517. self.gguf_writer.add_token_types(toktypes)
  518. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
  519. special_vocab.add_to_gguf(self.gguf_writer)
  520. def _set_vocab_qwen(self):
  521. dir_model = self.dir_model
  522. hparams = self.hparams
  523. tokens: list[str] = []
  524. toktypes: list[int] = []
  525. from transformers import AutoTokenizer
  526. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  527. vocab_size = hparams["vocab_size"]
  528. assert max(tokenizer.get_vocab().values()) < vocab_size
  529. tokpre = self.get_vocab_base_pre(tokenizer)
  530. merges = []
  531. vocab = {}
  532. mergeable_ranks = tokenizer.mergeable_ranks
  533. for token, rank in mergeable_ranks.items():
  534. vocab[QwenModel.token_bytes_to_string(token)] = rank
  535. if len(token) == 1:
  536. continue
  537. merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
  538. assert len(merged) == 2
  539. merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
  540. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  541. added_vocab = tokenizer.special_tokens
  542. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  543. for i in range(vocab_size):
  544. if i not in reverse_vocab:
  545. tokens.append(f"[PAD{i}]")
  546. toktypes.append(gguf.TokenType.UNUSED)
  547. elif reverse_vocab[i] in added_vocab:
  548. tokens.append(reverse_vocab[i])
  549. toktypes.append(gguf.TokenType.CONTROL)
  550. else:
  551. tokens.append(reverse_vocab[i])
  552. toktypes.append(gguf.TokenType.NORMAL)
  553. self.gguf_writer.add_tokenizer_model("gpt2")
  554. self.gguf_writer.add_tokenizer_pre(tokpre)
  555. self.gguf_writer.add_token_list(tokens)
  556. self.gguf_writer.add_token_types(toktypes)
  557. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  558. special_vocab.merges = merges
  559. # only add special tokens when they were not already loaded from config.json
  560. if len(special_vocab.special_token_ids) == 0:
  561. special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
  562. special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
  563. # this one is usually not in config.json anyway
  564. special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
  565. special_vocab.add_to_gguf(self.gguf_writer)
  566. def _set_vocab_sentencepiece(self, add_to_gguf=True):
  567. tokens, scores, toktypes = self._create_vocab_sentencepiece()
  568. self.gguf_writer.add_tokenizer_model("llama")
  569. self.gguf_writer.add_tokenizer_pre("default")
  570. self.gguf_writer.add_token_list(tokens)
  571. self.gguf_writer.add_token_scores(scores)
  572. self.gguf_writer.add_token_types(toktypes)
  573. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  574. special_vocab.add_to_gguf(self.gguf_writer)
  575. def _create_vocab_sentencepiece(self):
  576. from sentencepiece import SentencePieceProcessor
  577. tokenizer_path = self.dir_model / 'tokenizer.model'
  578. if not tokenizer_path.is_file():
  579. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  580. tokenizer = SentencePieceProcessor()
  581. tokenizer.LoadFromFile(str(tokenizer_path))
  582. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  583. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  584. scores: list[float] = [-10000.0] * vocab_size
  585. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  586. for token_id in range(tokenizer.vocab_size()):
  587. piece = tokenizer.IdToPiece(token_id)
  588. text = piece.encode("utf-8")
  589. score = tokenizer.GetScore(token_id)
  590. toktype = SentencePieceTokenTypes.NORMAL
  591. if tokenizer.IsUnknown(token_id):
  592. toktype = SentencePieceTokenTypes.UNKNOWN
  593. elif tokenizer.IsControl(token_id):
  594. toktype = SentencePieceTokenTypes.CONTROL
  595. elif tokenizer.IsUnused(token_id):
  596. toktype = SentencePieceTokenTypes.UNUSED
  597. elif tokenizer.IsByte(token_id):
  598. toktype = SentencePieceTokenTypes.BYTE
  599. tokens[token_id] = text
  600. scores[token_id] = score
  601. toktypes[token_id] = toktype
  602. added_tokens_file = self.dir_model / 'added_tokens.json'
  603. if added_tokens_file.is_file():
  604. with open(added_tokens_file, "r", encoding="utf-8") as f:
  605. added_tokens_json = json.load(f)
  606. for key in added_tokens_json:
  607. token_id = added_tokens_json[key]
  608. if (token_id >= vocab_size):
  609. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  610. continue
  611. tokens[token_id] = key.encode("utf-8")
  612. scores[token_id] = -1000.0
  613. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  614. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  615. if tokenizer_config_file.is_file():
  616. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  617. tokenizer_config_json = json.load(f)
  618. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  619. for token_id, token_data in added_tokens_decoder.items():
  620. token_id = int(token_id)
  621. token: str = token_data["content"]
  622. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  623. if tokens[token_id] != token.encode("utf-8"):
  624. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}')
  625. if token_data.get("special") or self.does_token_look_special(token):
  626. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  627. else:
  628. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
  629. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  630. scores[token_id] = -1000.0
  631. tokens[token_id] = token.encode("utf-8")
  632. if vocab_size > len(tokens):
  633. pad_count = vocab_size - len(tokens)
  634. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  635. for i in range(1, pad_count + 1):
  636. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  637. scores.append(-1000.0)
  638. toktypes.append(SentencePieceTokenTypes.UNUSED)
  639. return tokens, scores, toktypes
  640. def _set_vocab_llama_hf(self):
  641. vocab = gguf.LlamaHfVocab(self.dir_model)
  642. tokens = []
  643. scores = []
  644. toktypes = []
  645. for text, score, toktype in vocab.all_tokens():
  646. tokens.append(text)
  647. scores.append(score)
  648. toktypes.append(toktype)
  649. assert len(tokens) == vocab.vocab_size
  650. self.gguf_writer.add_tokenizer_model("llama")
  651. self.gguf_writer.add_tokenizer_pre("default")
  652. self.gguf_writer.add_token_list(tokens)
  653. self.gguf_writer.add_token_scores(scores)
  654. self.gguf_writer.add_token_types(toktypes)
  655. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  656. special_vocab.add_to_gguf(self.gguf_writer)
  657. def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
  658. tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
  659. logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
  660. vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
  661. default_pre = "mpt" if model_name == "gpt-neox" else "default"
  662. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
  663. assert field # tokenizer model
  664. self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
  665. field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
  666. self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
  667. field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
  668. assert field # token list
  669. self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
  670. if model_name == "llama-spm":
  671. field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
  672. assert field # token scores
  673. self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  674. field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
  675. assert field # token types
  676. self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
  677. if model_name != "llama-spm":
  678. field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
  679. assert field # token merges
  680. self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
  681. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
  682. self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
  683. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
  684. self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
  685. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
  686. self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
  687. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
  688. self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
  689. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
  690. self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
  691. if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
  692. self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
  693. @Model.register("GPTNeoXForCausalLM")
  694. class GPTNeoXModel(Model):
  695. model_arch = gguf.MODEL_ARCH.GPTNEOX
  696. def set_gguf_parameters(self):
  697. block_count = self.hparams["num_hidden_layers"]
  698. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  699. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  700. self.gguf_writer.add_block_count(block_count)
  701. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  702. self.gguf_writer.add_rope_dimension_count(
  703. int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])),
  704. )
  705. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  706. self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
  707. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
  708. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  709. del bid # unused
  710. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  711. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  712. tensors: list[tuple[str, Tensor]] = []
  713. if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
  714. # Map bloom-style qkv_linear to gpt-style qkv_linear
  715. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  716. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  717. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  718. data_torch = torch.cat(
  719. (
  720. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  721. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  722. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  723. ),
  724. dim=0,
  725. )
  726. logger.info("re-format attention.linear_qkv.weight")
  727. elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
  728. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  729. data_torch = torch.cat(
  730. (
  731. qkv_bias[:, 0, :].reshape((n_embed,)),
  732. qkv_bias[:, 1, :].reshape((n_embed,)),
  733. qkv_bias[:, 2, :].reshape((n_embed,)),
  734. ),
  735. dim=0,
  736. )
  737. logger.info("re-format attention.linear_qkv.bias")
  738. tensors.append((self.map_tensor_name(name), data_torch))
  739. return tensors
  740. @Model.register("BloomForCausalLM")
  741. class BloomModel(Model):
  742. model_arch = gguf.MODEL_ARCH.BLOOM
  743. def set_gguf_parameters(self):
  744. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  745. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  746. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  747. self.gguf_writer.add_embedding_length(n_embed)
  748. self.gguf_writer.add_feed_forward_length(4 * n_embed)
  749. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  750. self.gguf_writer.add_head_count(n_head)
  751. self.gguf_writer.add_head_count_kv(n_head)
  752. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  753. self.gguf_writer.add_file_type(self.ftype)
  754. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  755. del bid # unused
  756. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  757. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  758. name = re.sub(r'transformer\.', '', name)
  759. tensors: list[tuple[str, Tensor]] = []
  760. if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
  761. # Map bloom-style qkv_linear to gpt-style qkv_linear
  762. # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
  763. # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
  764. qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
  765. data_torch = torch.cat(
  766. (
  767. qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
  768. qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
  769. qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
  770. ),
  771. dim=0,
  772. )
  773. logger.info("re-format attention.linear_qkv.weight")
  774. elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
  775. qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
  776. data_torch = torch.cat(
  777. (
  778. qkv_bias[:, 0, :].reshape((n_embed,)),
  779. qkv_bias[:, 1, :].reshape((n_embed,)),
  780. qkv_bias[:, 2, :].reshape((n_embed,)),
  781. ),
  782. dim=0,
  783. )
  784. logger.info("re-format attention.linear_qkv.bias")
  785. tensors.append((self.map_tensor_name(name), data_torch))
  786. if name == "word_embeddings.weight":
  787. assert self.tensor_names is not None
  788. # TODO: tie them at runtime, don't duplicate in the model file
  789. if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
  790. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  791. return tensors
  792. @Model.register("MPTForCausalLM")
  793. class MPTModel(Model):
  794. model_arch = gguf.MODEL_ARCH.MPT
  795. def set_vocab(self):
  796. try:
  797. self._set_vocab_gpt2()
  798. except Exception:
  799. # Fallback for SEA-LION model
  800. self._set_vocab_sentencepiece()
  801. self.gguf_writer.add_add_bos_token(False)
  802. self.gguf_writer.add_pad_token_id(3)
  803. self.gguf_writer.add_eos_token_id(1)
  804. self.gguf_writer.add_unk_token_id(0)
  805. def set_gguf_parameters(self):
  806. block_count = self.hparams["n_layers"]
  807. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  808. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  809. self.gguf_writer.add_block_count(block_count)
  810. self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"])
  811. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  812. if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"):
  813. self.gguf_writer.add_head_count_kv(kv_n_heads)
  814. self.gguf_writer.add_layer_norm_eps(1e-5)
  815. if self.hparams["attn_config"]["clip_qkv"] is not None:
  816. self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"])
  817. if self.hparams["attn_config"]["alibi"]:
  818. self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"])
  819. else:
  820. self.gguf_writer.add_max_alibi_bias(0.0)
  821. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  822. del bid # unused
  823. if "scales" in name:
  824. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales"))
  825. new_name = new_name.replace("scales", "act.scales")
  826. else:
  827. new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias"))
  828. return [(new_name, data_torch)]
  829. @Model.register("OrionForCausalLM")
  830. class OrionModel(Model):
  831. model_arch = gguf.MODEL_ARCH.ORION
  832. def set_vocab(self):
  833. self._set_vocab_sentencepiece()
  834. def set_gguf_parameters(self):
  835. block_count = self.hparams["num_hidden_layers"]
  836. head_count = self.hparams["num_attention_heads"]
  837. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  838. ctx_length = 0
  839. if "max_sequence_length" in self.hparams:
  840. ctx_length = self.hparams["max_sequence_length"]
  841. elif "max_position_embeddings" in self.hparams:
  842. ctx_length = self.hparams["max_position_embeddings"]
  843. elif "model_max_length" in self.hparams:
  844. ctx_length = self.hparams["model_max_length"]
  845. else:
  846. raise ValueError("gguf: can not find ctx length parameter.")
  847. self.gguf_writer.add_file_type(self.ftype)
  848. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  849. self.gguf_writer.add_context_length(ctx_length)
  850. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  851. self.gguf_writer.add_block_count(block_count)
  852. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  853. self.gguf_writer.add_head_count(head_count)
  854. self.gguf_writer.add_head_count_kv(head_count_kv)
  855. # note: config provides rms norm but it is actually layer norm
  856. # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571
  857. self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"])
  858. @Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM")
  859. class BaichuanModel(Model):
  860. model_arch = gguf.MODEL_ARCH.BAICHUAN
  861. def set_vocab(self):
  862. self._set_vocab_sentencepiece()
  863. def set_gguf_parameters(self):
  864. block_count = self.hparams["num_hidden_layers"]
  865. head_count = self.hparams["num_attention_heads"]
  866. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  867. ctx_length = 0
  868. if "max_sequence_length" in self.hparams:
  869. ctx_length = self.hparams["max_sequence_length"]
  870. elif "max_position_embeddings" in self.hparams:
  871. ctx_length = self.hparams["max_position_embeddings"]
  872. elif "model_max_length" in self.hparams:
  873. ctx_length = self.hparams["model_max_length"]
  874. else:
  875. raise ValueError("gguf: can not find ctx length parameter.")
  876. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  877. self.gguf_writer.add_context_length(ctx_length)
  878. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  879. self.gguf_writer.add_block_count(block_count)
  880. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  881. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  882. self.gguf_writer.add_head_count(head_count)
  883. self.gguf_writer.add_head_count_kv(head_count_kv)
  884. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  885. self.gguf_writer.add_file_type(self.ftype)
  886. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  887. if self.hparams["rope_scaling"].get("type") == "linear":
  888. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  889. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  890. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  891. head_count = self.hparams["num_attention_heads"]
  892. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  893. tensors: list[tuple[str, Tensor]] = []
  894. if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight":
  895. logger.info(f"Unpacking and permuting layer {bid}")
  896. tensors = [
  897. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid),
  898. self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)),
  899. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid),
  900. self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)),
  901. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid),
  902. self._reverse_hf_part(data_torch, 2)),
  903. ]
  904. else:
  905. tensors = [(self.map_tensor_name(name), data_torch)]
  906. return tensors
  907. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  908. if n_kv_head is not None and n_head != n_kv_head:
  909. n_head //= n_kv_head
  910. return (
  911. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  912. .swapaxes(1, 2)
  913. .reshape(weights.shape)
  914. )
  915. def _reverse_hf_permute_part(
  916. self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None,
  917. ) -> Tensor:
  918. r = weights.shape[0] // 3
  919. return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv)
  920. def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor:
  921. r = weights.shape[0] // 3
  922. return weights[r * n_part:r * n_part + r, ...]
  923. @Model.register("XverseForCausalLM")
  924. class XverseModel(Model):
  925. model_arch = gguf.MODEL_ARCH.XVERSE
  926. def set_vocab(self):
  927. assert (self.dir_model / "tokenizer.json").is_file()
  928. dir_model = self.dir_model
  929. hparams = self.hparams
  930. tokens: list[bytes] = []
  931. toktypes: list[int] = []
  932. from transformers import AutoTokenizer
  933. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  934. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  935. # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
  936. # because vocab_size is the count of items, and indexes start at 0.
  937. max_vocab_index = max(tokenizer.get_vocab().values())
  938. if max_vocab_index >= vocab_size:
  939. raise ValueError("Vocabulary size exceeds expected maximum size.")
  940. reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
  941. added_vocab = tokenizer.get_added_vocab()
  942. for token_id in range(vocab_size):
  943. token_text = reverse_vocab[token_id].encode('utf-8')
  944. # replace "\x00" to string with length > 0
  945. if token_text == b"\x00":
  946. toktype = gguf.TokenType.BYTE # special
  947. token_text = f"<{token_text}>".encode('utf-8')
  948. elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
  949. toktype = gguf.TokenType.BYTE # special
  950. elif reverse_vocab[token_id] in added_vocab:
  951. if tokenizer.added_tokens_decoder[token_id].special:
  952. toktype = gguf.TokenType.CONTROL
  953. else:
  954. toktype = gguf.TokenType.USER_DEFINED
  955. else:
  956. toktype = gguf.TokenType.NORMAL
  957. tokens.append(token_text)
  958. toktypes.append(toktype)
  959. self.gguf_writer.add_tokenizer_model("llama")
  960. self.gguf_writer.add_tokenizer_pre("default")
  961. self.gguf_writer.add_token_list(tokens)
  962. self.gguf_writer.add_token_types(toktypes)
  963. special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens))
  964. special_vocab.add_to_gguf(self.gguf_writer)
  965. def set_gguf_parameters(self):
  966. block_count = self.hparams["num_hidden_layers"]
  967. head_count = self.hparams["num_attention_heads"]
  968. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  969. ctx_length = 0
  970. if "max_sequence_length" in self.hparams:
  971. ctx_length = self.hparams["max_sequence_length"]
  972. elif "max_position_embeddings" in self.hparams:
  973. ctx_length = self.hparams["max_position_embeddings"]
  974. elif "model_max_length" in self.hparams:
  975. ctx_length = self.hparams["model_max_length"]
  976. else:
  977. raise ValueError("gguf: can not find ctx length parameter.")
  978. self.gguf_writer.add_tensor_data_layout("Meta AI original pth")
  979. self.gguf_writer.add_context_length(ctx_length)
  980. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  981. self.gguf_writer.add_block_count(block_count)
  982. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  983. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  984. self.gguf_writer.add_head_count(head_count)
  985. self.gguf_writer.add_head_count_kv(head_count_kv)
  986. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  987. self.gguf_writer.add_file_type(self.ftype)
  988. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  989. if self.hparams["rope_scaling"].get("type") == "linear":
  990. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  991. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  992. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  993. del bid # unused
  994. head_count = self.hparams["num_attention_heads"]
  995. head_count_kv = self.hparams.get("num_key_value_heads", head_count)
  996. # HF models permute some of the tensors, so we need to undo that
  997. if name.endswith("q_proj.weight"):
  998. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count)
  999. if name.endswith("k_proj.weight"):
  1000. data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv)
  1001. return [(self.map_tensor_name(name), data_torch)]
  1002. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1003. if n_kv_head is not None and n_head != n_kv_head:
  1004. n_head //= n_kv_head
  1005. return (
  1006. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1007. .swapaxes(1, 2)
  1008. .reshape(weights.shape)
  1009. )
  1010. @Model.register("FalconForCausalLM", "RWForCausalLM")
  1011. class FalconModel(Model):
  1012. model_arch = gguf.MODEL_ARCH.FALCON
  1013. def set_gguf_parameters(self):
  1014. block_count = self.hparams.get("num_hidden_layers")
  1015. if block_count is None:
  1016. block_count = self.hparams["n_layer"] # old name
  1017. n_head = self.hparams.get("num_attention_heads")
  1018. if n_head is None:
  1019. n_head = self.hparams["n_head"] # old name
  1020. n_head_kv = self.hparams.get("num_kv_heads")
  1021. if n_head_kv is None:
  1022. n_head_kv = self.hparams.get("n_head_kv", 1) # old name
  1023. self.gguf_writer.add_context_length(2048) # not in config.json
  1024. self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  1025. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1026. self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"])
  1027. self.gguf_writer.add_block_count(block_count)
  1028. self.gguf_writer.add_head_count(n_head)
  1029. self.gguf_writer.add_head_count_kv(n_head_kv)
  1030. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1031. self.gguf_writer.add_file_type(self.ftype)
  1032. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1033. del bid # unused
  1034. # QKV tensor transform
  1035. # The original query_key_value tensor contains n_head_kv "kv groups",
  1036. # each consisting of n_head/n_head_kv query weights followed by one key
  1037. # and one value weight (shared by all query heads in the kv group).
  1038. # This layout makes it a big pain to work with in GGML.
  1039. # So we rearrange them here,, so that we have n_head query weights
  1040. # followed by n_head_kv key weights followed by n_head_kv value weights,
  1041. # in contiguous fashion.
  1042. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  1043. if "query_key_value" in name:
  1044. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1045. n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1
  1046. head_dim = self.hparams["hidden_size"] // n_head
  1047. qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  1048. q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head)
  1049. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1050. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  1051. data_torch = torch.cat((q, k, v)).reshape_as(data_torch)
  1052. return [(self.map_tensor_name(name), data_torch)]
  1053. @Model.register("GPTBigCodeForCausalLM")
  1054. class StarCoderModel(Model):
  1055. model_arch = gguf.MODEL_ARCH.STARCODER
  1056. def set_gguf_parameters(self):
  1057. block_count = self.hparams["n_layer"]
  1058. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1059. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1060. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1061. self.gguf_writer.add_block_count(block_count)
  1062. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1063. self.gguf_writer.add_head_count_kv(1)
  1064. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1065. self.gguf_writer.add_file_type(self.ftype)
  1066. @Model.register("GPTRefactForCausalLM")
  1067. class RefactModel(Model):
  1068. model_arch = gguf.MODEL_ARCH.REFACT
  1069. def set_vocab(self):
  1070. super().set_vocab()
  1071. # TODO: how to determine special FIM tokens automatically?
  1072. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1073. special_token_types = ['prefix', 'suffix', 'middle', 'eot'])
  1074. special_vocab._set_special_token("prefix", 1)
  1075. special_vocab._set_special_token("suffix", 3)
  1076. special_vocab._set_special_token("middle", 2)
  1077. special_vocab.chat_template = None # do not add it twice
  1078. special_vocab.add_to_gguf(self.gguf_writer)
  1079. def set_gguf_parameters(self):
  1080. hidden_dim = self.hparams["n_embd"]
  1081. inner_dim = 4 * hidden_dim
  1082. hidden_dim = int(2 * inner_dim / 3)
  1083. multiple_of = 256
  1084. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1085. block_count = self.hparams["n_layer"]
  1086. # refact uses Alibi. So this is from config.json which might be used by training.
  1087. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1088. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1089. self.gguf_writer.add_feed_forward_length(ff_dim)
  1090. self.gguf_writer.add_block_count(block_count)
  1091. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1092. self.gguf_writer.add_head_count_kv(1)
  1093. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1094. self.gguf_writer.add_file_type(self.ftype)
  1095. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1096. hidden_dim = self.hparams["n_embd"]
  1097. inner_dim = 4 * hidden_dim
  1098. hidden_dim = int(2 * inner_dim / 3)
  1099. multiple_of = 256
  1100. ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
  1101. n_head = self.hparams["n_head"]
  1102. n_head_kv = 1
  1103. head_dim = self.hparams["n_embd"] // n_head
  1104. tensors: list[tuple[str, Tensor]] = []
  1105. if bid is not None:
  1106. if name == f"transformer.h.{bid}.attn.kv.weight":
  1107. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim]))
  1108. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:]))
  1109. elif name == f"transformer.h.{bid}.attn.q.weight":
  1110. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch))
  1111. elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight":
  1112. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]))
  1113. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]))
  1114. if len(tensors) == 0:
  1115. tensors.append((self.map_tensor_name(name), data_torch))
  1116. return tensors
  1117. @Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM")
  1118. class StableLMModel(Model):
  1119. model_arch = gguf.MODEL_ARCH.STABLELM
  1120. def set_vocab(self):
  1121. if (self.dir_model / "tokenizer.json").is_file():
  1122. self._set_vocab_gpt2()
  1123. else:
  1124. # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
  1125. self._set_vocab_qwen()
  1126. def set_gguf_parameters(self):
  1127. hparams = self.hparams
  1128. block_count = hparams["num_hidden_layers"]
  1129. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1130. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1131. self.gguf_writer.add_block_count(block_count)
  1132. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1133. rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"])
  1134. self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
  1135. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1136. self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"])
  1137. self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
  1138. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"]))
  1139. self.gguf_writer.add_file_type(self.ftype)
  1140. _q_norms: list[dict[str, Tensor]] | None = None
  1141. _k_norms: list[dict[str, Tensor]] | None = None
  1142. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1143. n_head = self.hparams["num_attention_heads"]
  1144. n_kv_head = self.hparams["num_key_value_heads"]
  1145. if name.find("q_layernorm.norms") != -1:
  1146. assert bid is not None
  1147. if self._q_norms is None:
  1148. self._q_norms = [{} for _ in range(self.block_count)]
  1149. self._q_norms[bid][name] = data_torch
  1150. if len(self._q_norms[bid]) >= n_head:
  1151. return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm")
  1152. else:
  1153. return []
  1154. if name.find("k_layernorm.norms") != -1:
  1155. assert bid is not None
  1156. if self._k_norms is None:
  1157. self._k_norms = [{} for _ in range(self.block_count)]
  1158. self._k_norms[bid][name] = data_torch
  1159. if len(self._k_norms[bid]) >= n_kv_head:
  1160. return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm")
  1161. else:
  1162. return []
  1163. return [(self.map_tensor_name(name), data_torch)]
  1164. def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"):
  1165. datas: list[Tensor] = []
  1166. # extract the norms in order
  1167. for xid in range(n_head):
  1168. ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight"
  1169. datas.append(norms[ename])
  1170. del norms[ename]
  1171. data_torch = torch.stack(datas, dim=0)
  1172. merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
  1173. new_name = self.map_tensor_name(merged_name)
  1174. return [(new_name, data_torch)]
  1175. def prepare_tensors(self):
  1176. super().prepare_tensors()
  1177. if self._q_norms is not None or self._k_norms is not None:
  1178. # flatten two `list[dict[str, Tensor]]` into a single `list[str]`
  1179. norms = (
  1180. [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else []
  1181. ) + (
  1182. [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else []
  1183. )
  1184. if len(norms) > 0:
  1185. raise ValueError(f"Unprocessed norms: {norms}")
  1186. @Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
  1187. class LlamaModel(Model):
  1188. model_arch = gguf.MODEL_ARCH.LLAMA
  1189. def set_vocab(self):
  1190. try:
  1191. self._set_vocab_sentencepiece()
  1192. except FileNotFoundError:
  1193. try:
  1194. self._set_vocab_llama_hf()
  1195. except (FileNotFoundError, TypeError):
  1196. # Llama 3
  1197. self._set_vocab_gpt2()
  1198. # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256)
  1199. if self.hparams.get("vocab_size", 32000) == 32016:
  1200. special_vocab = gguf.SpecialVocab(
  1201. self.dir_model, load_merges=False,
  1202. special_token_types = ['prefix', 'suffix', 'middle', 'eot']
  1203. )
  1204. special_vocab._set_special_token("prefix", 32007)
  1205. special_vocab._set_special_token("suffix", 32008)
  1206. special_vocab._set_special_token("middle", 32009)
  1207. special_vocab._set_special_token("eot", 32010)
  1208. special_vocab.add_to_gguf(self.gguf_writer)
  1209. def set_gguf_parameters(self):
  1210. super().set_gguf_parameters()
  1211. hparams = self.hparams
  1212. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  1213. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  1214. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1215. if self.hparams["rope_scaling"].get("type") == "linear":
  1216. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1217. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1218. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1219. if tokenizer_config_file.is_file():
  1220. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1221. tokenizer_config_json = json.load(f)
  1222. if "add_prefix_space" in tokenizer_config_json:
  1223. self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
  1224. # Apply to granite small models only
  1225. if self.hparams.get("vocab_size", 32000) == 49152:
  1226. self.gguf_writer.add_add_bos_token(False)
  1227. @staticmethod
  1228. def permute(weights: Tensor, n_head: int, n_head_kv: int | None):
  1229. if n_head_kv is not None and n_head != n_head_kv:
  1230. n_head = n_head_kv
  1231. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1232. .swapaxes(1, 2)
  1233. .reshape(weights.shape))
  1234. _experts: list[dict[str, Tensor]] | None = None
  1235. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1236. n_head = self.hparams["num_attention_heads"]
  1237. n_kv_head = self.hparams.get("num_key_value_heads")
  1238. if name.endswith(("q_proj.weight", "q_proj.bias")):
  1239. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  1240. if name.endswith(("k_proj.weight", "k_proj.bias")):
  1241. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  1242. # process the experts separately
  1243. if name.find("block_sparse_moe.experts") != -1:
  1244. n_experts = self.hparams["num_local_experts"]
  1245. assert bid is not None
  1246. if self._experts is None:
  1247. self._experts = [{} for _ in range(self.block_count)]
  1248. self._experts[bid][name] = data_torch
  1249. if len(self._experts[bid]) >= n_experts * 3:
  1250. tensors: list[tuple[str, Tensor]] = []
  1251. # merge the experts into a single 3d tensor
  1252. for wid in ["w1", "w2", "w3"]:
  1253. datas: list[Tensor] = []
  1254. for xid in range(n_experts):
  1255. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  1256. datas.append(self._experts[bid][ename])
  1257. del self._experts[bid][ename]
  1258. data_torch = torch.stack(datas, dim=0)
  1259. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  1260. new_name = self.map_tensor_name(merged_name)
  1261. tensors.append((new_name, data_torch))
  1262. return tensors
  1263. else:
  1264. return []
  1265. return [(self.map_tensor_name(name), data_torch)]
  1266. def prepare_tensors(self):
  1267. super().prepare_tensors()
  1268. if self._experts is not None:
  1269. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1270. experts = [k for d in self._experts for k in d.keys()]
  1271. if len(experts) > 0:
  1272. raise ValueError(f"Unprocessed experts: {experts}")
  1273. @Model.register("BitnetForCausalLM")
  1274. class BitnetModel(Model):
  1275. model_arch = gguf.MODEL_ARCH.BITNET
  1276. def set_vocab(self):
  1277. self._set_vocab_sentencepiece()
  1278. def set_gguf_parameters(self):
  1279. super().set_gguf_parameters()
  1280. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1281. self.gguf_writer.add_rope_scaling_factor(1.0)
  1282. def weight_quant(self, weight):
  1283. dtype = weight.dtype
  1284. weight = weight.float()
  1285. s = 1 / weight.abs().mean().clamp(min=1e-5)
  1286. weight = (weight * s).round().clamp(-1, 1) / s
  1287. scale = weight.abs().max().unsqueeze(0)
  1288. weight = torch.where(weight.abs().less(1e-6), 0, weight).type(dtype)
  1289. weight = torch.sign(weight).type(dtype)
  1290. return weight.type(dtype), scale.type(torch.float32)
  1291. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1292. new_name = self.map_tensor_name(name)
  1293. if any(self.match_model_tensor_name(new_name, key, bid) for key in [
  1294. gguf.MODEL_TENSOR.ATTN_Q,
  1295. gguf.MODEL_TENSOR.ATTN_K,
  1296. gguf.MODEL_TENSOR.ATTN_V,
  1297. gguf.MODEL_TENSOR.ATTN_OUT,
  1298. gguf.MODEL_TENSOR.FFN_UP,
  1299. gguf.MODEL_TENSOR.FFN_DOWN,
  1300. gguf.MODEL_TENSOR.FFN_GATE,
  1301. ]):
  1302. # transform weight into 1/0/-1 (in fp32)
  1303. weight_torch, scale_torch = self.weight_quant(data_torch)
  1304. yield (new_name, weight_torch)
  1305. yield (new_name.removesuffix(".weight") + ".scale", scale_torch)
  1306. else:
  1307. yield (new_name, data_torch)
  1308. @Model.register("GrokForCausalLM")
  1309. class GrokModel(Model):
  1310. model_arch = gguf.MODEL_ARCH.GROK
  1311. def set_vocab(self):
  1312. self._set_vocab_sentencepiece()
  1313. def __init__(self, *args, **kwargs):
  1314. super().__init__(*args, **kwargs)
  1315. def set_gguf_parameters(self):
  1316. super().set_gguf_parameters()
  1317. _experts: list[dict[str, Tensor]] | None = None
  1318. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1319. # process the experts separately
  1320. if name.find(".moe.") != -1:
  1321. n_experts = self.hparams["num_local_experts"]
  1322. assert bid is not None
  1323. if self._experts is None:
  1324. self._experts = [{} for _ in range(self.block_count)]
  1325. self._experts[bid][name] = data_torch
  1326. if len(self._experts[bid]) >= n_experts * 3:
  1327. tensors: list[tuple[str, Tensor]] = []
  1328. # merge the experts into a single 3d tensor
  1329. for wid in ["linear", "linear_1", "linear_v"]:
  1330. datas: list[Tensor] = []
  1331. for xid in range(n_experts):
  1332. ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
  1333. datas.append(self._experts[bid][ename])
  1334. del self._experts[bid][ename]
  1335. data_torch = torch.stack(datas, dim=0)
  1336. merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
  1337. new_name = self.map_tensor_name(merged_name)
  1338. tensors.append((new_name, data_torch))
  1339. return tensors
  1340. else:
  1341. return []
  1342. return [(self.map_tensor_name(name), data_torch)]
  1343. @Model.register("DbrxForCausalLM")
  1344. class DbrxModel(Model):
  1345. model_arch = gguf.MODEL_ARCH.DBRX
  1346. def set_gguf_parameters(self):
  1347. ffn_config = self.hparams["ffn_config"]
  1348. attn_config = self.hparams["attn_config"]
  1349. self.gguf_writer.add_block_count(self.hparams["n_layers"])
  1350. self.gguf_writer.add_context_length(self.hparams["max_seq_len"])
  1351. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  1352. self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"])
  1353. self.gguf_writer.add_head_count(self.hparams["n_heads"])
  1354. self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"])
  1355. self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
  1356. self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
  1357. self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
  1358. self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
  1359. self.gguf_writer.add_layer_norm_eps(1e-5)
  1360. self.gguf_writer.add_file_type(self.ftype)
  1361. logger.info(f"gguf: file type = {self.ftype}")
  1362. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1363. del bid # unused
  1364. n_expert = self.hparams["ffn_config"]["moe_num_experts"]
  1365. n_ff = self.hparams["ffn_config"]["ffn_hidden_size"]
  1366. n_embd = self.hparams["d_model"]
  1367. # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose
  1368. # original implementation expects (n_expert, n_ff, n_embd) for all experts weights
  1369. # But llama.cpp moe graph works differently
  1370. # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions
  1371. # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor
  1372. exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1373. "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert}
  1374. "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert}
  1375. experts = False
  1376. for exp_tensor_name in exp_tensor_names.keys():
  1377. if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1:
  1378. experts = True
  1379. data_torch = data_torch.view(n_expert, n_ff, n_embd)
  1380. if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None:
  1381. data_torch = data_torch.permute(*permute_tensor)
  1382. break
  1383. # map tensor names
  1384. # In MoE models the ffn tensors are typically most of the model weights,
  1385. # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight.
  1386. # Every other model has the weight names ending in .weight,
  1387. # let's assume that is the convention which is not the case for dbrx:
  1388. # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
  1389. new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
  1390. return [(new_name, data_torch)]
  1391. def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  1392. del name, new_name, bid # unused
  1393. return n_dims > 1
  1394. @Model.register("MiniCPMForCausalLM")
  1395. class MiniCPMModel(Model):
  1396. model_arch = gguf.MODEL_ARCH.MINICPM
  1397. def set_gguf_parameters(self):
  1398. block_count = self.hparams["num_hidden_layers"]
  1399. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1400. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1401. self.gguf_writer.add_block_count(block_count)
  1402. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1403. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1404. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1405. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  1406. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1407. self.gguf_writer.add_file_type(self.ftype)
  1408. def set_vocab(self):
  1409. self._set_vocab_llama_hf()
  1410. def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor:
  1411. if n_kv_head is not None and n_head != n_kv_head:
  1412. n_head //= n_kv_head
  1413. return (
  1414. weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  1415. .swapaxes(1, 2)
  1416. .reshape(weights.shape)
  1417. )
  1418. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1419. del bid # unused
  1420. n_head = self.hparams["num_attention_heads"]
  1421. n_kv_head = self.hparams.get("num_key_value_heads")
  1422. # HF models permute some of the tensors, so we need to undo that
  1423. if name.endswith(("q_proj.weight")):
  1424. data_torch = self._reverse_hf_permute(data_torch, n_head, n_head)
  1425. if name.endswith(("k_proj.weight")):
  1426. data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head)
  1427. return [(self.map_tensor_name(name), data_torch)]
  1428. @Model.register("QWenLMHeadModel")
  1429. class QwenModel(Model):
  1430. model_arch = gguf.MODEL_ARCH.QWEN
  1431. @staticmethod
  1432. def token_bytes_to_string(b):
  1433. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  1434. byte_encoder = bytes_to_unicode()
  1435. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  1436. @staticmethod
  1437. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  1438. parts = [bytes([b]) for b in token]
  1439. while True:
  1440. min_idx = None
  1441. min_rank = None
  1442. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  1443. rank = mergeable_ranks.get(pair[0] + pair[1])
  1444. if rank is not None and (min_rank is None or rank < min_rank):
  1445. min_idx = i
  1446. min_rank = rank
  1447. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  1448. break
  1449. assert min_idx is not None
  1450. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  1451. return parts
  1452. def set_vocab(self):
  1453. self._set_vocab_qwen()
  1454. def set_gguf_parameters(self):
  1455. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1456. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  1457. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1458. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1459. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  1460. self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
  1461. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1462. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  1463. self.gguf_writer.add_file_type(self.ftype)
  1464. @Model.register("Qwen2ForCausalLM")
  1465. class Qwen2Model(Model):
  1466. model_arch = gguf.MODEL_ARCH.QWEN2
  1467. def set_vocab(self):
  1468. try:
  1469. self._set_vocab_sentencepiece()
  1470. except FileNotFoundError:
  1471. self._set_vocab_gpt2()
  1472. @Model.register("Qwen2MoeForCausalLM")
  1473. class Qwen2MoeModel(Model):
  1474. model_arch = gguf.MODEL_ARCH.QWEN2MOE
  1475. def set_gguf_parameters(self):
  1476. super().set_gguf_parameters()
  1477. if (n_experts := self.hparams.get("num_experts")) is not None:
  1478. self.gguf_writer.add_expert_count(n_experts)
  1479. if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
  1480. self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
  1481. logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
  1482. if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
  1483. self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
  1484. logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
  1485. _experts: list[dict[str, Tensor]] | None = None
  1486. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1487. # process the experts separately
  1488. if name.find("experts") != -1:
  1489. n_experts = self.hparams["num_experts"]
  1490. assert bid is not None
  1491. if self._experts is None:
  1492. self._experts = [{} for _ in range(self.block_count)]
  1493. self._experts[bid][name] = data_torch
  1494. if len(self._experts[bid]) >= n_experts * 3:
  1495. tensors: list[tuple[str, Tensor]] = []
  1496. # merge the experts into a single 3d tensor
  1497. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  1498. datas: list[Tensor] = []
  1499. for xid in range(n_experts):
  1500. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  1501. datas.append(self._experts[bid][ename])
  1502. del self._experts[bid][ename]
  1503. data_torch = torch.stack(datas, dim=0)
  1504. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  1505. new_name = self.map_tensor_name(merged_name)
  1506. tensors.append((new_name, data_torch))
  1507. return tensors
  1508. else:
  1509. return []
  1510. return [(self.map_tensor_name(name), data_torch)]
  1511. def prepare_tensors(self):
  1512. super().prepare_tensors()
  1513. if self._experts is not None:
  1514. # flatten `list[dict[str, Tensor]]` into `list[str]`
  1515. experts = [k for d in self._experts for k in d.keys()]
  1516. if len(experts) > 0:
  1517. raise ValueError(f"Unprocessed experts: {experts}")
  1518. @Model.register("GPT2LMHeadModel")
  1519. class GPT2Model(Model):
  1520. model_arch = gguf.MODEL_ARCH.GPT2
  1521. def set_gguf_parameters(self):
  1522. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  1523. self.gguf_writer.add_context_length(self.hparams["n_ctx"])
  1524. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1525. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1526. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1527. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1528. self.gguf_writer.add_file_type(self.ftype)
  1529. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1530. del bid # unused
  1531. tensors: list[tuple[str, Tensor]] = []
  1532. # we don't need these
  1533. if name.endswith((".attn.bias", ".attn.masked_bias")):
  1534. return tensors
  1535. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")):
  1536. data_torch = data_torch.transpose(1, 0)
  1537. new_name = self.map_tensor_name(name)
  1538. tensors.append((new_name, data_torch))
  1539. # note: GPT2 output is tied to (same as) wte in original model
  1540. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  1541. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  1542. return tensors
  1543. @Model.register("PhiForCausalLM")
  1544. class Phi2Model(Model):
  1545. model_arch = gguf.MODEL_ARCH.PHI2
  1546. def set_gguf_parameters(self):
  1547. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  1548. rot_pct = self.find_hparam(["partial_rotary_factor"])
  1549. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  1550. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1551. self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
  1552. self.gguf_writer.add_embedding_length(n_embd)
  1553. self.gguf_writer.add_feed_forward_length(4 * n_embd)
  1554. self.gguf_writer.add_block_count(block_count)
  1555. self.gguf_writer.add_head_count(n_head)
  1556. self.gguf_writer.add_head_count_kv(n_head)
  1557. self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
  1558. self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
  1559. self.gguf_writer.add_file_type(self.ftype)
  1560. self.gguf_writer.add_add_bos_token(False)
  1561. @Model.register("Phi3ForCausalLM")
  1562. class Phi3MiniModel(Model):
  1563. model_arch = gguf.MODEL_ARCH.PHI3
  1564. def set_vocab(self):
  1565. from sentencepiece import SentencePieceProcessor
  1566. tokenizer_path = self.dir_model / 'tokenizer.model'
  1567. if not tokenizer_path.is_file():
  1568. raise ValueError(f'Error: Missing {tokenizer_path}')
  1569. tokenizer = SentencePieceProcessor()
  1570. tokenizer.LoadFromFile(str(tokenizer_path))
  1571. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  1572. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  1573. scores: list[float] = [-10000.0] * vocab_size
  1574. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  1575. for token_id in range(tokenizer.vocab_size()):
  1576. piece = tokenizer.IdToPiece(token_id)
  1577. text = piece.encode("utf-8")
  1578. score = tokenizer.GetScore(token_id)
  1579. toktype = SentencePieceTokenTypes.NORMAL
  1580. if tokenizer.IsUnknown(token_id):
  1581. toktype = SentencePieceTokenTypes.UNKNOWN
  1582. elif tokenizer.IsControl(token_id):
  1583. toktype = SentencePieceTokenTypes.CONTROL
  1584. elif tokenizer.IsUnused(token_id):
  1585. toktype = SentencePieceTokenTypes.UNUSED
  1586. elif tokenizer.IsByte(token_id):
  1587. toktype = SentencePieceTokenTypes.BYTE
  1588. tokens[token_id] = text
  1589. scores[token_id] = score
  1590. toktypes[token_id] = toktype
  1591. added_tokens_file = self.dir_model / 'added_tokens.json'
  1592. if added_tokens_file.is_file():
  1593. with open(added_tokens_file, "r", encoding="utf-8") as f:
  1594. added_tokens_json = json.load(f)
  1595. for key in added_tokens_json:
  1596. token_id = added_tokens_json[key]
  1597. if (token_id >= vocab_size):
  1598. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  1599. continue
  1600. tokens[token_id] = key.encode("utf-8")
  1601. scores[token_id] = -1000.0
  1602. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1603. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1604. if tokenizer_config_file.is_file():
  1605. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1606. tokenizer_config_json = json.load(f)
  1607. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  1608. for token_id, foken_data in added_tokens_decoder.items():
  1609. token_id = int(token_id)
  1610. token = foken_data["content"].encode("utf-8")
  1611. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1612. if tokens[token_id] != token:
  1613. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1614. tokens[token_id] = token
  1615. scores[token_id] = -1000.0
  1616. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1617. if foken_data.get("special"):
  1618. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1619. tokenizer_file = self.dir_model / 'tokenizer.json'
  1620. if tokenizer_file.is_file():
  1621. with open(tokenizer_file, "r", encoding="utf-8") as f:
  1622. tokenizer_json = json.load(f)
  1623. added_tokens = tokenizer_json.get("added_tokens", [])
  1624. for foken_data in added_tokens:
  1625. token_id = int(foken_data["id"])
  1626. token = foken_data["content"].encode("utf-8")
  1627. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1628. if tokens[token_id] != token:
  1629. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1630. tokens[token_id] = token
  1631. scores[token_id] = -1000.0
  1632. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1633. if foken_data.get("special"):
  1634. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1635. self.gguf_writer.add_tokenizer_model("llama")
  1636. self.gguf_writer.add_tokenizer_pre("default")
  1637. self.gguf_writer.add_token_list(tokens)
  1638. self.gguf_writer.add_token_scores(scores)
  1639. self.gguf_writer.add_token_types(toktypes)
  1640. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1641. special_vocab.add_to_gguf(self.gguf_writer)
  1642. def set_gguf_parameters(self):
  1643. block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
  1644. n_embd = self.find_hparam(["hidden_size", "n_embd"])
  1645. n_head = self.find_hparam(["num_attention_heads", "n_head"])
  1646. n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
  1647. rms_eps = self.find_hparam(["rms_norm_eps"])
  1648. max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"])
  1649. orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"])
  1650. rope_dims = n_embd // n_head
  1651. self.gguf_writer.add_context_length(max_pos_embds)
  1652. self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds)
  1653. self.gguf_writer.add_embedding_length(n_embd)
  1654. self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"]))
  1655. self.gguf_writer.add_block_count(block_count)
  1656. self.gguf_writer.add_head_count(n_head)
  1657. self.gguf_writer.add_head_count_kv(n_head_kv)
  1658. self.gguf_writer.add_layer_norm_rms_eps(rms_eps)
  1659. self.gguf_writer.add_rope_dimension_count(rope_dims)
  1660. self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
  1661. self.gguf_writer.add_file_type(self.ftype)
  1662. # write rope scaling for long context (128k) model
  1663. rope_scaling = self.find_hparam(['rope_scaling'], True)
  1664. if (rope_scaling is None):
  1665. return
  1666. scale = max_pos_embds / orig_max_pos_embds
  1667. rope_scaling_type = rope_scaling.get('type', '').lower()
  1668. if len(rope_scaling_type) == 0:
  1669. raise KeyError('Missing the required key rope_scaling.type')
  1670. if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
  1671. attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
  1672. elif rope_scaling_type == 'yarn':
  1673. attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
  1674. else:
  1675. raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet')
  1676. self.gguf_writer.add_rope_scaling_attn_factors(attn_factor)
  1677. long_factors = rope_scaling.get('long_factor', None)
  1678. short_factors = rope_scaling.get('short_factor', None)
  1679. if long_factors is None or short_factors is None:
  1680. raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor')
  1681. if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2:
  1682. raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}')
  1683. self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_LONG] + ".weight", np.array(long_factors, dtype=np.float32))
  1684. self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT] + ".weight", np.array(short_factors, dtype=np.float32))
  1685. @Model.register("PlamoForCausalLM")
  1686. class PlamoModel(Model):
  1687. model_arch = gguf.MODEL_ARCH.PLAMO
  1688. def set_vocab(self):
  1689. self._set_vocab_sentencepiece()
  1690. def set_gguf_parameters(self):
  1691. hparams = self.hparams
  1692. block_count = hparams["num_hidden_layers"]
  1693. self.gguf_writer.add_context_length(4096) # not in config.json
  1694. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1695. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  1696. self.gguf_writer.add_block_count(block_count)
  1697. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  1698. self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong
  1699. self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
  1700. self.gguf_writer.add_file_type(self.ftype)
  1701. def shuffle_attn_q_weight(self, data_torch):
  1702. assert data_torch.size() == (5120, 5120)
  1703. data_torch = data_torch.reshape(8, 5, 128, 5120)
  1704. data_torch = torch.permute(data_torch, (1, 0, 2, 3))
  1705. data_torch = torch.reshape(data_torch, (5120, 5120))
  1706. return data_torch
  1707. def shuffle_attn_output_weight(self, data_torch):
  1708. assert data_torch.size() == (5120, 5120)
  1709. data_torch = data_torch.reshape(5120, 8, 5, 128)
  1710. data_torch = torch.permute(data_torch, (0, 2, 1, 3))
  1711. data_torch = torch.reshape(data_torch, (5120, 5120))
  1712. return data_torch
  1713. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1714. del bid # unused
  1715. new_name = self.map_tensor_name(name)
  1716. # shuffle for broadcasting of gqa in ggml_mul_mat
  1717. if new_name.endswith("attn_q.weight"):
  1718. data_torch = self.shuffle_attn_q_weight(data_torch)
  1719. elif new_name.endswith("attn_output.weight"):
  1720. data_torch = self.shuffle_attn_output_weight(data_torch)
  1721. return [(new_name, data_torch)]
  1722. @Model.register("CodeShellForCausalLM")
  1723. class CodeShellModel(Model):
  1724. model_arch = gguf.MODEL_ARCH.CODESHELL
  1725. def set_gguf_parameters(self):
  1726. block_count = self.hparams["n_layer"]
  1727. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  1728. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  1729. self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"])
  1730. self.gguf_writer.add_block_count(block_count)
  1731. self.gguf_writer.add_head_count(self.hparams["n_head"])
  1732. self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"])
  1733. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  1734. self.gguf_writer.add_file_type(self.ftype)
  1735. self.gguf_writer.add_rope_freq_base(10000.0)
  1736. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1737. self.gguf_writer.add_rope_scaling_factor(1.0)
  1738. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1739. del bid # unused
  1740. new_name = self.map_tensor_name(name)
  1741. tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)]
  1742. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  1743. assert self.tensor_names is not None
  1744. if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
  1745. # copy tok_embd.weight to output.weight
  1746. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
  1747. return tensors
  1748. @Model.register("InternLM2ForCausalLM")
  1749. class InternLM2Model(Model):
  1750. model_arch = gguf.MODEL_ARCH.INTERNLM2
  1751. def set_vocab(self):
  1752. # (TODO): Is there a better way?
  1753. # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character
  1754. # \x00 specially and convert it into an emoji character to prevent it from being mistakenly
  1755. # recognized as an empty string in C++.
  1756. from sentencepiece import SentencePieceProcessor
  1757. from sentencepiece import sentencepiece_model_pb2 as model
  1758. tokenizer_path = self.dir_model / 'tokenizer.model'
  1759. tokens: list[bytes] = []
  1760. scores: list[float] = []
  1761. toktypes: list[int] = []
  1762. if not tokenizer_path.is_file():
  1763. logger.error(f'Error: Missing {tokenizer_path}')
  1764. sys.exit(1)
  1765. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  1766. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  1767. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  1768. tokenizer = SentencePieceProcessor()
  1769. tokenizer.LoadFromFile(str(tokenizer_path))
  1770. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  1771. for token_id in range(vocab_size):
  1772. piece = tokenizer.IdToPiece(token_id)
  1773. text = piece.encode("utf-8")
  1774. score = tokenizer.GetScore(token_id)
  1775. if text == b"\x00":
  1776. # (TODO): fixme
  1777. # Hack here and replace the \x00 characters.
  1778. logger.warning(f"InternLM2 convert token '{text}' to '🐉'!")
  1779. text = "🐉".encode("utf-8")
  1780. toktype = SentencePieceTokenTypes.NORMAL
  1781. if tokenizer.IsUnknown(token_id):
  1782. toktype = SentencePieceTokenTypes.UNKNOWN
  1783. elif tokenizer.IsControl(token_id):
  1784. toktype = SentencePieceTokenTypes.CONTROL
  1785. elif tokenizer.IsUnused(token_id):
  1786. toktype = SentencePieceTokenTypes.UNUSED
  1787. elif tokenizer.IsByte(token_id):
  1788. toktype = SentencePieceTokenTypes.BYTE
  1789. # take care of ununsed raw token
  1790. if piece.startswith('[UNUSED'):
  1791. toktype = SentencePieceTokenTypes.UNUSED
  1792. tokens.append(text)
  1793. scores.append(score)
  1794. toktypes.append(toktype)
  1795. added_tokens_file = self.dir_model / 'added_tokens.json'
  1796. if added_tokens_file.is_file():
  1797. with open(added_tokens_file, "r", encoding="utf-8") as f:
  1798. added_tokens_json = json.load(f)
  1799. for key in added_tokens_json:
  1800. tokens.append(key.encode("utf-8"))
  1801. scores.append(-1000.0)
  1802. toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
  1803. chat_eos_token = '<|im_end|>'
  1804. chat_eos_token_id = None
  1805. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  1806. if tokenizer_config_file.is_file():
  1807. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  1808. tokenizer_config_json = json.load(f)
  1809. added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
  1810. for token_id, foken_data in added_tokens_decoder.items():
  1811. token_id = int(token_id)
  1812. token = foken_data["content"]
  1813. if token == chat_eos_token:
  1814. chat_eos_token_id = token_id
  1815. token = token.encode("utf-8")
  1816. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1817. if tokens[token_id] != token:
  1818. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1819. tokens[token_id] = token
  1820. scores[token_id] = -1000.0
  1821. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1822. if foken_data.get("special"):
  1823. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1824. tokenizer_file = self.dir_model / 'tokenizer.json'
  1825. if tokenizer_file.is_file():
  1826. with open(tokenizer_file, "r", encoding="utf-8") as f:
  1827. tokenizer_json = json.load(f)
  1828. added_tokens = tokenizer_json.get("added_tokens", [])
  1829. for foken_data in added_tokens:
  1830. token_id = int(foken_data["id"])
  1831. token = foken_data["content"]
  1832. if token == chat_eos_token:
  1833. chat_eos_token_id = token_id
  1834. token = token.encode("utf-8")
  1835. if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
  1836. if tokens[token_id] != token:
  1837. logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}')
  1838. tokens[token_id] = token
  1839. scores[token_id] = -1000.0
  1840. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  1841. if foken_data.get("special"):
  1842. toktypes[token_id] = SentencePieceTokenTypes.CONTROL
  1843. self.gguf_writer.add_tokenizer_model("llama")
  1844. self.gguf_writer.add_tokenizer_pre("default")
  1845. self.gguf_writer.add_token_list(tokens)
  1846. self.gguf_writer.add_token_scores(scores)
  1847. self.gguf_writer.add_token_types(toktypes)
  1848. self.gguf_writer.add_add_space_prefix(add_prefix)
  1849. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1850. old_eos = special_vocab.special_token_ids["eos"]
  1851. if chat_eos_token_id is not None:
  1852. # For the chat model, we replace the eos with '<|im_end|>'.
  1853. # TODO: this is a hack, should be fixed
  1854. # https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048
  1855. special_vocab.special_token_ids["eos"] = chat_eos_token_id
  1856. logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
  1857. " in chat mode so that the conversation can end normally.")
  1858. special_vocab.add_to_gguf(self.gguf_writer)
  1859. def set_gguf_parameters(self):
  1860. self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
  1861. self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
  1862. self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
  1863. self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
  1864. self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"])
  1865. self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
  1866. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  1867. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
  1868. self.gguf_writer.add_file_type(self.ftype)
  1869. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  1870. if self.hparams["rope_scaling"].get("type") == "linear":
  1871. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
  1872. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  1873. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1874. num_heads = self.hparams["num_attention_heads"]
  1875. num_kv_heads = self.hparams["num_key_value_heads"]
  1876. n_embd = self.hparams["hidden_size"]
  1877. q_per_kv = num_heads // num_kv_heads
  1878. head_dim = n_embd // num_heads
  1879. num_groups = num_heads // q_per_kv
  1880. if bid is not None and f"model.layers.{bid}.attention.wqkv" in name:
  1881. qkv = data_torch
  1882. qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd))
  1883. q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1]
  1884. # The model weights of q and k equire additional reshape.
  1885. q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads)
  1886. k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads)
  1887. v = v.reshape((-1, v.shape[-1]))
  1888. return [
  1889. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q),
  1890. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k),
  1891. (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v),
  1892. ]
  1893. else:
  1894. return [(self.map_tensor_name(name), data_torch)]
  1895. @Model.register("BertModel", "CamembertModel")
  1896. class BertModel(Model):
  1897. model_arch = gguf.MODEL_ARCH.BERT
  1898. def __init__(self, *args, **kwargs):
  1899. super().__init__(*args, **kwargs)
  1900. self.vocab_size = None
  1901. def set_gguf_parameters(self):
  1902. super().set_gguf_parameters()
  1903. self.gguf_writer.add_causal_attention(False)
  1904. # get pooling path
  1905. pooling_path = None
  1906. module_path = self.dir_model / "modules.json"
  1907. if module_path.is_file():
  1908. with open(module_path, encoding="utf-8") as f:
  1909. modules = json.load(f)
  1910. for mod in modules:
  1911. if mod["type"] == "sentence_transformers.models.Pooling":
  1912. pooling_path = mod["path"]
  1913. break
  1914. # get pooling type
  1915. if pooling_path is not None:
  1916. with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
  1917. pooling = json.load(f)
  1918. if pooling["pooling_mode_mean_tokens"]:
  1919. pooling_type = gguf.PoolingType.MEAN
  1920. elif pooling["pooling_mode_cls_token"]:
  1921. pooling_type = gguf.PoolingType.CLS
  1922. else:
  1923. raise NotImplementedError("Only MEAN and CLS pooling types supported")
  1924. self.gguf_writer.add_pooling_type(pooling_type)
  1925. def set_vocab(self):
  1926. tokens, toktypes, tokpre = self.get_vocab_base()
  1927. self.vocab_size = len(tokens)
  1928. # we need this to validate the size of the token_type embeddings
  1929. # though currently we are passing all zeros to the token_type embeddings
  1930. self.gguf_writer.add_token_type_count(2) # "Sequence A" or "Sequence B"
  1931. # convert to phantom space vocab
  1932. def phantom(tok):
  1933. if tok.startswith("[") and tok.endswith("]"):
  1934. return tok
  1935. if tok.startswith("##"):
  1936. return tok[2:]
  1937. return "\u2581" + tok
  1938. tokens = list(map(phantom, tokens))
  1939. # add vocab to gguf
  1940. self.gguf_writer.add_tokenizer_model("bert")
  1941. self.gguf_writer.add_tokenizer_pre(tokpre)
  1942. self.gguf_writer.add_token_list(tokens)
  1943. self.gguf_writer.add_token_types(toktypes)
  1944. # handle special tokens
  1945. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  1946. special_vocab.add_to_gguf(self.gguf_writer)
  1947. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  1948. del bid # unused
  1949. # we are only using BERT for embeddings so we don't need the pooling layer
  1950. if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"):
  1951. return [] # we don't need these
  1952. return [(self.map_tensor_name(name), data_torch)]
  1953. @Model.register("NomicBertModel")
  1954. class NomicBertModel(BertModel):
  1955. model_arch = gguf.MODEL_ARCH.NOMIC_BERT
  1956. def __init__(self, *args, **kwargs):
  1957. super().__init__(*args, **kwargs)
  1958. # the HF config claims n_ctx=8192, but it uses RoPE scaling
  1959. self.hparams["n_ctx"] = 2048
  1960. # SwigLU activation
  1961. assert self.hparams["activation_function"] == "swiglu"
  1962. # this doesn't do anything in the HF version
  1963. assert self.hparams["causal"] is False
  1964. # no bias tensors
  1965. assert self.hparams["qkv_proj_bias"] is False
  1966. assert self.hparams["mlp_fc1_bias"] is False
  1967. assert self.hparams["mlp_fc2_bias"] is False
  1968. # norm at end of layer
  1969. assert self.hparams["prenorm"] is False
  1970. # standard RoPE
  1971. assert self.hparams["rotary_emb_fraction"] == 1.0
  1972. assert self.hparams["rotary_emb_interleaved"] is False
  1973. assert self.hparams["rotary_emb_scale_base"] is None
  1974. def set_gguf_parameters(self):
  1975. super().set_gguf_parameters()
  1976. self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
  1977. @Model.register("GemmaForCausalLM")
  1978. class GemmaModel(Model):
  1979. model_arch = gguf.MODEL_ARCH.GEMMA
  1980. def set_vocab(self):
  1981. self._set_vocab_sentencepiece()
  1982. # TODO: these special tokens should be exported only for the CodeGemma family
  1983. special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
  1984. special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
  1985. special_vocab._set_special_token("prefix", 67)
  1986. special_vocab._set_special_token("suffix", 69)
  1987. special_vocab._set_special_token("middle", 68)
  1988. special_vocab._set_special_token("fsep", 70)
  1989. special_vocab._set_special_token("eot", 107)
  1990. special_vocab.chat_template = None # do not add it twice
  1991. special_vocab.add_to_gguf(self.gguf_writer)
  1992. self.gguf_writer.add_add_space_prefix(False)
  1993. def set_gguf_parameters(self):
  1994. hparams = self.hparams
  1995. block_count = hparams["num_hidden_layers"]
  1996. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  1997. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  1998. self.gguf_writer.add_block_count(block_count)
  1999. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2000. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2001. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  2002. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2003. self.gguf_writer.add_key_length(hparams["head_dim"])
  2004. self.gguf_writer.add_value_length(hparams["head_dim"])
  2005. self.gguf_writer.add_file_type(self.ftype)
  2006. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2007. del bid # unused
  2008. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  2009. # To prevent errors, skip loading lm_head.weight.
  2010. if name == "lm_head.weight":
  2011. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  2012. return []
  2013. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  2014. if name.endswith("norm.weight"):
  2015. data_torch = data_torch + 1
  2016. return [(self.map_tensor_name(name), data_torch)]
  2017. @Model.register("Gemma2ForCausalLM")
  2018. class Gemma2Model(Model):
  2019. model_arch = gguf.MODEL_ARCH.GEMMA2
  2020. def set_vocab(self):
  2021. self._set_vocab_sentencepiece()
  2022. self.gguf_writer.add_add_space_prefix(False)
  2023. def set_gguf_parameters(self):
  2024. hparams = self.hparams
  2025. block_count = hparams["num_hidden_layers"]
  2026. self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
  2027. self.gguf_writer.add_embedding_length(hparams["hidden_size"])
  2028. self.gguf_writer.add_block_count(block_count)
  2029. self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
  2030. self.gguf_writer.add_head_count(hparams["num_attention_heads"])
  2031. self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"])
  2032. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
  2033. self.gguf_writer.add_key_length(hparams["head_dim"])
  2034. self.gguf_writer.add_value_length(hparams["head_dim"])
  2035. self.gguf_writer.add_file_type(self.ftype)
  2036. self.gguf_writer.add_attn_logit_softcapping(
  2037. self.hparams["attn_logit_softcapping"]
  2038. )
  2039. self.gguf_writer.add_final_logit_softcapping(
  2040. self.hparams["final_logit_softcapping"]
  2041. )
  2042. self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
  2043. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2044. del bid # unused
  2045. # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
  2046. # To prevent errors, skip loading lm_head.weight.
  2047. if name == "lm_head.weight":
  2048. logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
  2049. return []
  2050. # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
  2051. if name.endswith("norm.weight"):
  2052. data_torch = data_torch + 1
  2053. return [(self.map_tensor_name(name), data_torch)]
  2054. @Model.register("Starcoder2ForCausalLM")
  2055. class StarCoder2Model(Model):
  2056. model_arch = gguf.MODEL_ARCH.STARCODER2
  2057. @Model.register("MambaForCausalLM", "MambaLMHeadModel")
  2058. class MambaModel(Model):
  2059. model_arch = gguf.MODEL_ARCH.MAMBA
  2060. def set_vocab(self):
  2061. vocab_size = self.hparams["vocab_size"]
  2062. # Round vocab size to next multiple of 8
  2063. pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8)
  2064. # pad using ceiling division
  2065. # ref: https://stackoverflow.com/a/17511341/22827863
  2066. vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
  2067. self.hparams["vocab_size"] = vocab_size
  2068. if (self.dir_model / "tokenizer.json").is_file():
  2069. self._set_vocab_gpt2()
  2070. elif (self.dir_model / "tokenizer.model").is_file():
  2071. self._set_vocab_sentencepiece()
  2072. else:
  2073. # Use the GPT-NeoX tokenizer when no tokenizer files are present
  2074. self._set_vocab_builtin("gpt-neox", vocab_size)
  2075. def set_gguf_parameters(self):
  2076. d_model = self.find_hparam(["hidden_size", "d_model"])
  2077. d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
  2078. d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
  2079. d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16
  2080. # ceiling division
  2081. # ref: https://stackoverflow.com/a/17511341/22827863
  2082. # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
  2083. dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
  2084. rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
  2085. # Fail early for models which don't have a block expansion factor of 2
  2086. assert d_inner == 2 * d_model
  2087. self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
  2088. self.gguf_writer.add_embedding_length(d_model)
  2089. self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
  2090. self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
  2091. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  2092. self.gguf_writer.add_ssm_conv_kernel(d_conv)
  2093. self.gguf_writer.add_ssm_inner_size(d_inner)
  2094. self.gguf_writer.add_ssm_state_size(d_state)
  2095. self.gguf_writer.add_ssm_time_step_rank(dt_rank)
  2096. self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
  2097. self.gguf_writer.add_file_type(self.ftype)
  2098. _tok_embd = None
  2099. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2100. del bid # unused
  2101. output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
  2102. tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
  2103. new_name = self.map_tensor_name(name)
  2104. if name.endswith(".A_log"):
  2105. logger.debug("A_log --> A ==> " + new_name)
  2106. data_torch = -torch.exp(data_torch)
  2107. # assuming token_embd.weight is seen before output.weight
  2108. if self._tok_embd is not None and new_name == output_name:
  2109. if torch.equal(self._tok_embd, data_torch):
  2110. logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting")
  2111. return []
  2112. elif new_name == tok_embd_name:
  2113. self._tok_embd = data_torch
  2114. return [(new_name, data_torch)]
  2115. def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
  2116. del n_dims # unused
  2117. return bid is not None and new_name in (
  2118. self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [
  2119. gguf.MODEL_TENSOR.SSM_CONV1D,
  2120. gguf.MODEL_TENSOR.SSM_X,
  2121. gguf.MODEL_TENSOR.SSM_DT,
  2122. gguf.MODEL_TENSOR.SSM_A,
  2123. gguf.MODEL_TENSOR.SSM_D,
  2124. ]
  2125. )
  2126. @Model.register("CohereForCausalLM")
  2127. class CommandR2Model(Model):
  2128. model_arch = gguf.MODEL_ARCH.COMMAND_R
  2129. def __init__(self, *args, **kwargs):
  2130. super().__init__(*args, **kwargs)
  2131. # max_position_embeddings = 8192 in config.json but model was actually
  2132. # trained on 128k context length
  2133. # aya-23 models don't have model_max_length specified
  2134. self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"])
  2135. def set_gguf_parameters(self):
  2136. super().set_gguf_parameters()
  2137. self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
  2138. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
  2139. @Model.register("OlmoForCausalLM")
  2140. @Model.register("OLMoForCausalLM")
  2141. class OlmoModel(Model):
  2142. model_arch = gguf.MODEL_ARCH.OLMO
  2143. def set_gguf_parameters(self):
  2144. super().set_gguf_parameters()
  2145. self.gguf_writer.add_layer_norm_eps(1e-5)
  2146. clip_qkv = self.hparams.get("clip_qkv")
  2147. if clip_qkv is not None:
  2148. self.gguf_writer.add_clamp_kqv(clip_qkv)
  2149. # Same as super class, but permuting q_proj, k_proj
  2150. # Copied from: LlamaModel
  2151. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2152. del bid # unused
  2153. n_head = self.hparams["num_attention_heads"]
  2154. n_kv_head = self.hparams.get("num_key_value_heads")
  2155. if name.endswith("q_proj.weight"):
  2156. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2157. if name.endswith("k_proj.weight"):
  2158. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2159. return [(self.map_tensor_name(name), data_torch)]
  2160. @Model.register("JinaBertModel", "JinaBertForMaskedLM")
  2161. class JinaBertV2Model(BertModel):
  2162. model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
  2163. def __init__(self, *args, **kwargs):
  2164. super().__init__(*args, **kwargs)
  2165. self.intermediate_size = self.hparams["intermediate_size"]
  2166. def get_tensors(self):
  2167. for name, data in super().get_tensors():
  2168. if 'gated_layer' in name:
  2169. d1 = data[:self.intermediate_size, :]
  2170. name1 = name.replace('gated_layers', 'gated_layers_w')
  2171. name1 = name1.replace('up_gated_layer', 'gated_layers_v')
  2172. d2 = data[self.intermediate_size:, :]
  2173. name2 = name.replace('gated_layers', 'gated_layers_v')
  2174. name2 = name2.replace('up_gated_layer', 'gated_layers_w')
  2175. yield name1, d1
  2176. yield name2, d2
  2177. continue
  2178. yield name, data
  2179. def set_vocab(self, *args, **kwargs):
  2180. tokenizer_class = 'BertTokenizer'
  2181. with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
  2182. tokenizer_class = json.load(f)['tokenizer_class']
  2183. if tokenizer_class == 'BertTokenizer':
  2184. super().set_vocab()
  2185. elif tokenizer_class == 'RobertaTokenizer':
  2186. self._set_vocab_gpt2()
  2187. self.gguf_writer.add_token_type_count(2)
  2188. else:
  2189. raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
  2190. self.gguf_writer.add_add_bos_token(True)
  2191. self.gguf_writer.add_add_eos_token(True)
  2192. @Model.register("OpenELMForCausalLM")
  2193. class OpenELMModel(Model):
  2194. model_arch = gguf.MODEL_ARCH.OPENELM
  2195. @staticmethod
  2196. def _make_divisible(v: float | int, divisor: int) -> int:
  2197. # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
  2198. new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
  2199. # Make sure that round down does not go down by more than 10%.
  2200. if new_v < 0.9 * v:
  2201. new_v += divisor
  2202. return new_v
  2203. def __init__(self, *args, **kwargs):
  2204. super().__init__(*args, **kwargs)
  2205. ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
  2206. ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
  2207. self._n_embd: int = self.hparams["model_dim"]
  2208. self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
  2209. self._num_query_heads: list[int] = self.hparams["num_query_heads"]
  2210. self._ffn_dims: list[int] = [
  2211. OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
  2212. for multiplier in ffn_multipliers
  2213. ]
  2214. assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
  2215. assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
  2216. # Uses the tokenizer from meta-llama/Llama-2-7b-hf
  2217. def set_vocab(self):
  2218. try:
  2219. self._set_vocab_sentencepiece()
  2220. except FileNotFoundError:
  2221. self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
  2222. def set_gguf_parameters(self):
  2223. n_embd = self._n_embd
  2224. head_dim = self.hparams["head_dim"]
  2225. rot_pct = 1.0
  2226. assert self.block_count == len(self._num_kv_heads)
  2227. assert self.block_count == len(self._num_query_heads)
  2228. assert self.block_count == len(self._ffn_dims)
  2229. self.gguf_writer.add_block_count(self.block_count)
  2230. self.gguf_writer.add_context_length(self.hparams["max_context_length"])
  2231. self.gguf_writer.add_embedding_length(n_embd)
  2232. self.gguf_writer.add_feed_forward_length(self._ffn_dims)
  2233. self.gguf_writer.add_head_count(self._num_query_heads)
  2234. self.gguf_writer.add_head_count_kv(self._num_kv_heads)
  2235. self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
  2236. # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
  2237. self.gguf_writer.add_layer_norm_rms_eps(1e-6)
  2238. self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
  2239. self.gguf_writer.add_key_length(head_dim)
  2240. self.gguf_writer.add_value_length(head_dim)
  2241. self.gguf_writer.add_file_type(self.ftype)
  2242. def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
  2243. if "n_layers" in keys:
  2244. return self.hparams["num_transformer_layers"]
  2245. return super().find_hparam(keys, optional)
  2246. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2247. # split ff
  2248. if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
  2249. ff_dim = self._ffn_dims[bid]
  2250. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
  2251. yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
  2252. return
  2253. yield (self.map_tensor_name(name), data_torch)
  2254. @Model.register("ArcticForCausalLM")
  2255. class ArcticModel(Model):
  2256. model_arch = gguf.MODEL_ARCH.ARCTIC
  2257. def set_vocab(self):
  2258. # The reason for using a custom implementation here is that the
  2259. # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from
  2260. # tokenizer.model and used them as BOS and EOS instead of adding new tokens.
  2261. from sentencepiece import SentencePieceProcessor
  2262. tokenizer_path = self.dir_model / 'tokenizer.model'
  2263. if not tokenizer_path.is_file():
  2264. logger.error(f'Error: Missing {tokenizer_path}')
  2265. sys.exit(1)
  2266. # Read the whole vocabulary from the tokenizer.model file
  2267. tokenizer = SentencePieceProcessor()
  2268. tokenizer.LoadFromFile(str(tokenizer_path))
  2269. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2270. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2271. scores: list[float] = [-10000.0] * vocab_size
  2272. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2273. for token_id in range(tokenizer.vocab_size()):
  2274. piece = tokenizer.IdToPiece(token_id)
  2275. text = piece.encode("utf-8")
  2276. score = tokenizer.GetScore(token_id)
  2277. toktype = SentencePieceTokenTypes.NORMAL
  2278. if tokenizer.IsUnknown(token_id):
  2279. toktype = SentencePieceTokenTypes.UNKNOWN
  2280. elif tokenizer.IsControl(token_id):
  2281. toktype = SentencePieceTokenTypes.CONTROL
  2282. elif tokenizer.IsUnused(token_id):
  2283. toktype = SentencePieceTokenTypes.UNUSED
  2284. elif tokenizer.IsByte(token_id):
  2285. toktype = SentencePieceTokenTypes.BYTE
  2286. tokens[token_id] = text
  2287. scores[token_id] = score
  2288. toktypes[token_id] = toktype
  2289. # Use the added_tokens_decoder field from tokeniser_config.json as the source
  2290. # of information about added/redefined tokens and modify them accordingly.
  2291. tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
  2292. if tokenizer_config_file.is_file():
  2293. with open(tokenizer_config_file, "r", encoding="utf-8") as f:
  2294. tokenizer_config_json = json.load(f)
  2295. if "added_tokens_decoder" in tokenizer_config_json:
  2296. added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
  2297. for token_id, token_json in added_tokens_decoder.items():
  2298. token_id = int(token_id)
  2299. if (token_id >= vocab_size):
  2300. logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2301. continue
  2302. token_content = token_json["content"]
  2303. token_type = SentencePieceTokenTypes.USER_DEFINED
  2304. token_score = -10000.0
  2305. # Map unk_token to UNKNOWN, other special tokens to CONTROL
  2306. # Set the score to 0.0 as in the original tokenizer.model
  2307. if ("special" in token_json) and token_json["special"]:
  2308. if token_content == tokenizer_config_json["unk_token"]:
  2309. token_type = SentencePieceTokenTypes.UNKNOWN
  2310. else:
  2311. token_type = SentencePieceTokenTypes.CONTROL
  2312. token_score = 0.0
  2313. logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})")
  2314. tokens[token_id] = token_content.encode("utf-8")
  2315. toktypes[token_id] = token_type
  2316. scores[token_id] = token_score
  2317. self.gguf_writer.add_tokenizer_model("llama")
  2318. self.gguf_writer.add_tokenizer_pre("default")
  2319. self.gguf_writer.add_token_list(tokens)
  2320. self.gguf_writer.add_token_scores(scores)
  2321. self.gguf_writer.add_token_types(toktypes)
  2322. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2323. special_vocab.add_to_gguf(self.gguf_writer)
  2324. def set_gguf_parameters(self):
  2325. super().set_gguf_parameters()
  2326. hparams = self.hparams
  2327. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2328. self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
  2329. _experts: list[dict[str, Tensor]] | None = None
  2330. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2331. n_head = self.hparams["num_attention_heads"]
  2332. n_kv_head = self.hparams.get("num_key_value_heads")
  2333. if name.endswith("q_proj.weight"):
  2334. data_torch = LlamaModel.permute(data_torch, n_head, n_head)
  2335. if name.endswith("k_proj.weight"):
  2336. data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
  2337. # process the experts separately
  2338. if name.find("block_sparse_moe.experts") != -1:
  2339. n_experts = self.hparams["num_local_experts"]
  2340. assert bid is not None
  2341. if self._experts is None:
  2342. self._experts = [{} for _ in range(self.block_count)]
  2343. self._experts[bid][name] = data_torch
  2344. if len(self._experts[bid]) >= n_experts * 3:
  2345. tensors: list[tuple[str, Tensor]] = []
  2346. # merge the experts into a single 3d tensor
  2347. for wid in ["w1", "w2", "w3"]:
  2348. datas: list[Tensor] = []
  2349. for xid in range(n_experts):
  2350. ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight"
  2351. datas.append(self._experts[bid][ename])
  2352. del self._experts[bid][ename]
  2353. data_torch = torch.stack(datas, dim=0)
  2354. merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight"
  2355. new_name = self.map_tensor_name(merged_name)
  2356. tensors.append((new_name, data_torch))
  2357. return tensors
  2358. else:
  2359. return []
  2360. return [(self.map_tensor_name(name), data_torch)]
  2361. def prepare_tensors(self):
  2362. super().prepare_tensors()
  2363. if self._experts is not None:
  2364. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2365. experts = [k for d in self._experts for k in d.keys()]
  2366. if len(experts) > 0:
  2367. raise ValueError(f"Unprocessed experts: {experts}")
  2368. @Model.register("DeepseekV2ForCausalLM")
  2369. class DeepseekV2Model(Model):
  2370. model_arch = gguf.MODEL_ARCH.DEEPSEEK2
  2371. def set_vocab(self):
  2372. self._set_vocab_gpt2()
  2373. def set_gguf_parameters(self):
  2374. super().set_gguf_parameters()
  2375. hparams = self.hparams
  2376. self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
  2377. self.gguf_writer.add_vocab_size(hparams["vocab_size"])
  2378. if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
  2379. self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
  2380. self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
  2381. self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
  2382. self.gguf_writer.add_value_length(hparams["v_head_dim"])
  2383. self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
  2384. self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
  2385. self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
  2386. self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
  2387. self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
  2388. if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
  2389. if self.hparams["rope_scaling"].get("type") == "yarn":
  2390. self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
  2391. self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
  2392. self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
  2393. self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * hparams["rope_scaling"]["mscale_all_dim"])
  2394. _experts: list[dict[str, Tensor]] | None = None
  2395. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2396. # process the experts separately
  2397. if name.find("mlp.experts") != -1:
  2398. n_experts = self.hparams["n_routed_experts"]
  2399. assert bid is not None
  2400. if self._experts is None:
  2401. self._experts = [{} for _ in range(self.block_count)]
  2402. self._experts[bid][name] = data_torch
  2403. if len(self._experts[bid]) >= n_experts * 3:
  2404. tensors: list[tuple[str, Tensor]] = []
  2405. # merge the experts into a single 3d tensor
  2406. for w_name in ["down_proj", "gate_proj", "up_proj"]:
  2407. datas: list[Tensor] = []
  2408. for xid in range(n_experts):
  2409. ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
  2410. datas.append(self._experts[bid][ename])
  2411. del self._experts[bid][ename]
  2412. data_torch = torch.stack(datas, dim=0)
  2413. merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
  2414. new_name = self.map_tensor_name(merged_name)
  2415. tensors.append((new_name, data_torch))
  2416. return tensors
  2417. else:
  2418. return []
  2419. return [(self.map_tensor_name(name), data_torch)]
  2420. def prepare_tensors(self):
  2421. super().prepare_tensors()
  2422. if self._experts is not None:
  2423. # flatten `list[dict[str, Tensor]]` into `list[str]`
  2424. experts = [k for d in self._experts for k in d.keys()]
  2425. if len(experts) > 0:
  2426. raise ValueError(f"Unprocessed experts: {experts}")
  2427. @Model.register("T5WithLMHeadModel")
  2428. @Model.register("T5ForConditionalGeneration")
  2429. @Model.register("MT5ForConditionalGeneration")
  2430. @Model.register("UMT5ForConditionalGeneration")
  2431. class T5Model(Model):
  2432. model_arch = gguf.MODEL_ARCH.T5
  2433. def __init__(self, *args, **kwargs):
  2434. super().__init__(*args, **kwargs)
  2435. self.shared_token_embeddings_found = False
  2436. def set_vocab(self):
  2437. # to avoid TypeError: Descriptors cannot be created directly
  2438. # exception when importing sentencepiece_model_pb2
  2439. os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
  2440. from sentencepiece import SentencePieceProcessor
  2441. from sentencepiece import sentencepiece_model_pb2 as model
  2442. tokenizer_path = self.dir_model / 'tokenizer.model'
  2443. # many older models use spiece.model tokenizer model filename
  2444. if not tokenizer_path.is_file():
  2445. tokenizer_path = self.dir_model / 'spiece.model'
  2446. if not tokenizer_path.is_file():
  2447. raise FileNotFoundError(f"File not found: {tokenizer_path}")
  2448. sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
  2449. sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
  2450. # some models like Pile-T5 family use BPE tokenizer instead of Unigram
  2451. if sentencepiece_model.trainer_spec.model_type == 2: # BPE
  2452. # assure the tokenizer model file name is correct
  2453. assert tokenizer_path.name == 'tokenizer.model'
  2454. return self._set_vocab_sentencepiece()
  2455. else:
  2456. assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
  2457. add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
  2458. remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
  2459. precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
  2460. tokenizer = SentencePieceProcessor()
  2461. tokenizer.LoadFromFile(str(tokenizer_path))
  2462. vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
  2463. tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
  2464. scores: list[float] = [-10000.0] * vocab_size
  2465. toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
  2466. for token_id in range(tokenizer.vocab_size()):
  2467. piece = tokenizer.IdToPiece(token_id)
  2468. text = piece.encode("utf-8")
  2469. score = tokenizer.GetScore(token_id)
  2470. toktype = SentencePieceTokenTypes.NORMAL
  2471. if tokenizer.IsUnknown(token_id):
  2472. toktype = SentencePieceTokenTypes.UNKNOWN
  2473. elif tokenizer.IsControl(token_id):
  2474. toktype = SentencePieceTokenTypes.CONTROL
  2475. elif tokenizer.IsUnused(token_id):
  2476. toktype = SentencePieceTokenTypes.UNUSED
  2477. elif tokenizer.IsByte(token_id):
  2478. toktype = SentencePieceTokenTypes.BYTE
  2479. tokens[token_id] = text
  2480. scores[token_id] = score
  2481. toktypes[token_id] = toktype
  2482. added_tokens_file = self.dir_model / 'added_tokens.json'
  2483. if added_tokens_file.is_file():
  2484. with open(added_tokens_file, "r", encoding="utf-8") as f:
  2485. added_tokens_json = json.load(f)
  2486. for key in added_tokens_json:
  2487. token_id = added_tokens_json[key]
  2488. if (token_id >= vocab_size):
  2489. logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
  2490. continue
  2491. tokens[token_id] = key.encode("utf-8")
  2492. scores[token_id] = -1000.0
  2493. toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
  2494. if vocab_size > len(tokens):
  2495. pad_count = vocab_size - len(tokens)
  2496. logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
  2497. for i in range(1, pad_count + 1):
  2498. tokens.append(bytes(f"[PAD{i}]", encoding="utf-8"))
  2499. scores.append(-1000.0)
  2500. toktypes.append(SentencePieceTokenTypes.UNUSED)
  2501. self.gguf_writer.add_tokenizer_model("t5")
  2502. self.gguf_writer.add_tokenizer_pre("default")
  2503. self.gguf_writer.add_token_list(tokens)
  2504. self.gguf_writer.add_token_scores(scores)
  2505. self.gguf_writer.add_token_types(toktypes)
  2506. self.gguf_writer.add_add_space_prefix(add_prefix)
  2507. self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces)
  2508. if precompiled_charsmap:
  2509. self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap)
  2510. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2511. special_vocab.add_to_gguf(self.gguf_writer)
  2512. self.gguf_writer.add_add_bos_token(False)
  2513. self.gguf_writer.add_add_eos_token(True)
  2514. def set_gguf_parameters(self):
  2515. if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
  2516. logger.warning("Couldn't find context length in config.json, assuming default value of 512")
  2517. n_ctx = 512
  2518. self.gguf_writer.add_context_length(n_ctx)
  2519. self.gguf_writer.add_embedding_length(self.hparams["d_model"])
  2520. self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
  2521. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  2522. self.gguf_writer.add_head_count(self.hparams["num_heads"])
  2523. self.gguf_writer.add_key_length(self.hparams["d_kv"])
  2524. self.gguf_writer.add_value_length(self.hparams["d_kv"])
  2525. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2526. self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"])
  2527. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
  2528. self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"])
  2529. self.gguf_writer.add_file_type(self.ftype)
  2530. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2531. del bid # unused
  2532. # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
  2533. # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
  2534. # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
  2535. # and decoder and ignore the remaining ones.
  2536. if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
  2537. if not self.shared_token_embeddings_found:
  2538. name = "shared.weight"
  2539. self.shared_token_embeddings_found = True
  2540. else:
  2541. logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
  2542. return []
  2543. return [(self.map_tensor_name(name), data_torch)]
  2544. @Model.register("JAISLMHeadModel")
  2545. class JaisModel(Model):
  2546. model_arch = gguf.MODEL_ARCH.JAIS
  2547. def __init__(self, *args, **kwargs):
  2548. super().__init__(*args, **kwargs)
  2549. # SwigLU activation
  2550. assert self.hparams["activation_function"] == "swiglu"
  2551. # ALiBi position embedding
  2552. assert self.hparams["position_embedding_type"] == "alibi"
  2553. # Embeddings scale
  2554. self.embeddings_scale = 1.0
  2555. # note: For some JAIS flavors, output is tied to (same as) wte in original model
  2556. self.output_is_wte = False
  2557. if 'mup_embeddings_scale' in self.hparams:
  2558. self.output_is_wte = True # Hack (?)
  2559. self.embeddings_scale = self.hparams['mup_embeddings_scale']
  2560. elif 'embeddings_scale' in self.hparams:
  2561. self.embeddings_scale = self.hparams['embeddings_scale']
  2562. else:
  2563. assert False
  2564. self.width_scale = 1.0
  2565. if 'mup_output_alpha' in self.hparams:
  2566. assert 'mup_width_scale' in self.hparams
  2567. self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
  2568. elif 'width_scale' in self.hparams:
  2569. self.width_scale = self.hparams['width_scale']
  2570. else:
  2571. assert False
  2572. self.max_alibi_bias = 8.0
  2573. def set_vocab(self):
  2574. self._set_vocab_gpt2()
  2575. def set_gguf_parameters(self):
  2576. self.gguf_writer.add_block_count(self.hparams["n_layer"])
  2577. self.gguf_writer.add_context_length(self.hparams["n_positions"])
  2578. self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
  2579. self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
  2580. self.gguf_writer.add_head_count(self.hparams["n_head"])
  2581. self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
  2582. self.gguf_writer.add_file_type(self.ftype)
  2583. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2584. del bid # unused
  2585. tensors: list[tuple[str, Tensor]] = []
  2586. # we don't need these
  2587. if name.endswith((".attn.bias")):
  2588. return tensors
  2589. if name.endswith(("relative_pe.slopes")):
  2590. # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
  2591. # Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
  2592. # but Jais's PyTorch model simply precalculates the slope values and places them
  2593. # in relative_pes.slopes
  2594. n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
  2595. first_val = float(data_torch[0].item())
  2596. self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
  2597. return tensors
  2598. if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
  2599. data_torch = data_torch.transpose(1, 0)
  2600. new_name = self.map_tensor_name(name)
  2601. if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
  2602. tensors.append((new_name, data_torch * self.embeddings_scale))
  2603. if self.output_is_wte:
  2604. tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch * self.width_scale))
  2605. elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
  2606. assert not self.output_is_wte
  2607. tensors.append((new_name, data_torch * self.width_scale))
  2608. else:
  2609. tensors.append((new_name, data_torch))
  2610. return tensors
  2611. def prepare_tensors(self):
  2612. super().prepare_tensors()
  2613. self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
  2614. @Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration")
  2615. class ChatGLMModel(Model):
  2616. model_arch = gguf.MODEL_ARCH.CHATGLM
  2617. def set_vocab_chatglm3(self):
  2618. dir_model = self.dir_model
  2619. hparams = self.hparams
  2620. tokens: list[bytes] = []
  2621. toktypes: list[int] = []
  2622. scores: list[float] = []
  2623. from transformers import AutoTokenizer
  2624. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  2625. vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
  2626. assert max(tokenizer.get_vocab().values()) < vocab_size
  2627. role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
  2628. special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
  2629. for token_id in range(vocab_size):
  2630. piece = tokenizer._convert_id_to_token(token_id)
  2631. if token_id == 0:
  2632. piece = "<unk>"
  2633. elif token_id == 1:
  2634. piece = "<bos>"
  2635. elif token_id == 2:
  2636. piece = "<eos>"
  2637. text = piece.encode("utf-8")
  2638. score = 0.0
  2639. # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
  2640. # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
  2641. if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
  2642. score = tokenizer.tokenizer.sp_model.get_score(token_id)
  2643. if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
  2644. if piece in special_tokens:
  2645. toktype = SentencePieceTokenTypes.CONTROL
  2646. elif len(piece) == 0:
  2647. text = f"[PAD{token_id}]".encode("utf-8")
  2648. toktype = SentencePieceTokenTypes.UNUSED
  2649. else:
  2650. toktype = SentencePieceTokenTypes.USER_DEFINED
  2651. tokens.append(text)
  2652. scores.append(score)
  2653. toktypes.append(toktype)
  2654. continue
  2655. toktype = SentencePieceTokenTypes.NORMAL
  2656. if tokenizer.tokenizer.sp_model.is_unknown(token_id):
  2657. toktype = SentencePieceTokenTypes.UNKNOWN
  2658. elif tokenizer.tokenizer.sp_model.is_control(token_id):
  2659. toktype = SentencePieceTokenTypes.CONTROL
  2660. elif tokenizer.tokenizer.sp_model.is_unused(token_id):
  2661. toktype = SentencePieceTokenTypes.UNUSED
  2662. elif tokenizer.tokenizer.sp_model.is_byte(token_id):
  2663. toktype = SentencePieceTokenTypes.BYTE
  2664. tokens.append(text)
  2665. scores.append(score)
  2666. toktypes.append(toktype)
  2667. self.gguf_writer.add_tokenizer_model("llama")
  2668. # glm3 needs prefix and suffix formatted as:
  2669. # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
  2670. self.gguf_writer.add_tokenizer_pre("chatglm-spm")
  2671. self.gguf_writer.add_token_list(tokens)
  2672. self.gguf_writer.add_token_scores(scores)
  2673. self.gguf_writer.add_token_types(toktypes)
  2674. special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
  2675. special_vocab.add_to_gguf(self.gguf_writer)
  2676. @staticmethod
  2677. def token_bytes_to_string(b):
  2678. from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
  2679. byte_encoder = bytes_to_unicode()
  2680. return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
  2681. @staticmethod
  2682. def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
  2683. parts = [bytes([b]) for b in token]
  2684. while True:
  2685. min_idx = None
  2686. min_rank = None
  2687. for i, pair in enumerate(zip(parts[:-1], parts[1:])):
  2688. rank = mergeable_ranks.get(pair[0] + pair[1])
  2689. if rank is not None and (min_rank is None or rank < min_rank):
  2690. min_idx = i
  2691. min_rank = rank
  2692. if min_rank is None or (max_rank is not None and min_rank >= max_rank):
  2693. break
  2694. assert min_idx is not None
  2695. parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
  2696. return parts
  2697. def set_vocab(self):
  2698. if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
  2699. self.set_vocab_chatglm3()
  2700. return
  2701. dir_model = self.dir_model
  2702. hparams = self.hparams
  2703. tokens: list[str] = []
  2704. toktypes: list[int] = []
  2705. from transformers import AutoTokenizer
  2706. tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
  2707. vocab_size = hparams["padded_vocab_size"]
  2708. assert max(tokenizer.get_vocab().values()) < vocab_size
  2709. tokpre = self.get_vocab_base_pre(tokenizer)
  2710. merges = []
  2711. vocab = {}
  2712. mergeable_ranks = tokenizer.mergeable_ranks
  2713. for token, rank in mergeable_ranks.items():
  2714. vocab[ChatGLMModel.token_bytes_to_string(token)] = rank
  2715. if len(token) == 1:
  2716. continue
  2717. merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank)
  2718. assert len(merged) >= 2 and len(merged) <= 7
  2719. merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged)))
  2720. # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
  2721. added_vocab = tokenizer.get_added_vocab()
  2722. reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
  2723. for i in range(vocab_size):
  2724. if i not in reverse_vocab:
  2725. tokens.append(f"[PAD{i}]")
  2726. toktypes.append(gguf.TokenType.UNUSED)
  2727. elif reverse_vocab[i] in added_vocab:
  2728. tokens.append(reverse_vocab[i])
  2729. if tokenizer.added_tokens_decoder[i].special:
  2730. toktypes.append(gguf.TokenType.CONTROL)
  2731. else:
  2732. toktypes.append(gguf.TokenType.USER_DEFINED)
  2733. else:
  2734. tokens.append(reverse_vocab[i])
  2735. toktypes.append(gguf.TokenType.NORMAL)
  2736. self.gguf_writer.add_tokenizer_model("gpt2")
  2737. self.gguf_writer.add_tokenizer_pre(tokpre)
  2738. self.gguf_writer.add_token_list(tokens)
  2739. self.gguf_writer.add_token_types(toktypes)
  2740. special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
  2741. special_vocab.merges = merges
  2742. # only add special tokens when they were not already loaded from config.json
  2743. special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
  2744. special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
  2745. # this one is usually not in config.json anyway
  2746. special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
  2747. special_vocab.add_to_gguf(self.gguf_writer)
  2748. def set_gguf_parameters(self):
  2749. n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
  2750. n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
  2751. n_head_kv = self.hparams.get("multi_query_group_num", n_head)
  2752. self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
  2753. self.gguf_writer.add_embedding_length(n_embed)
  2754. self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed))
  2755. self.gguf_writer.add_block_count(self.hparams["num_layers"])
  2756. self.gguf_writer.add_head_count(n_head)
  2757. self.gguf_writer.add_head_count_kv(n_head_kv)
  2758. self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"])
  2759. self.gguf_writer.add_file_type(self.ftype)
  2760. self.gguf_writer.add_rope_dimension_count(64)
  2761. self.gguf_writer.add_add_bos_token(False)
  2762. rope_freq = 10000
  2763. if "rope_ratio" in self.hparams:
  2764. rope_freq = rope_freq * self.hparams["rope_ratio"]
  2765. self.gguf_writer.add_rope_freq_base(rope_freq)
  2766. def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
  2767. del bid # unused
  2768. if name.endswith(".rotary_pos_emb.inv_freq"):
  2769. return []
  2770. name = name.removeprefix("transformer.")
  2771. return [(self.map_tensor_name(name), data_torch)]
  2772. ###### CONVERSION LOGIC ######
  2773. # tree of lazy tensors
  2774. class LazyTorchTensor(gguf.LazyBase):
  2775. _tensor_type = torch.Tensor
  2776. # to keep the type-checker happy
  2777. dtype: torch.dtype
  2778. shape: torch.Size
  2779. # only used when converting a torch.Tensor to a np.ndarray
  2780. _dtype_map: dict[torch.dtype, type] = {
  2781. torch.float16: np.float16,
  2782. torch.float32: np.float32,
  2783. }
  2784. # used for safetensors slices
  2785. # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046
  2786. # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734
  2787. _dtype_str_map: dict[str, torch.dtype] = {
  2788. "F64": torch.float64,
  2789. "F32": torch.float32,
  2790. "BF16": torch.bfloat16,
  2791. "F16": torch.float16,
  2792. # "U64": torch.uint64,
  2793. "I64": torch.int64,
  2794. # "U32": torch.uint32,
  2795. "I32": torch.int32,
  2796. # "U16": torch.uint16,
  2797. "I16": torch.int16,
  2798. "U8": torch.uint8,
  2799. "I8": torch.int8,
  2800. "BOOL": torch.bool,
  2801. "F8_E4M3": torch.float8_e4m3fn,
  2802. "F8_E5M2": torch.float8_e5m2,
  2803. }
  2804. def numpy(self) -> gguf.LazyNumpyTensor:
  2805. dtype = self._dtype_map[self.dtype]
  2806. return gguf.LazyNumpyTensor(
  2807. meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
  2808. args=(self,),
  2809. func=(lambda s: s.numpy())
  2810. )
  2811. @classmethod
  2812. def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor:
  2813. return torch.empty(size=shape, dtype=dtype, device="meta")
  2814. @classmethod
  2815. def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
  2816. dtype = cls._dtype_str_map[st_slice.get_dtype()]
  2817. shape: tuple[int, ...] = tuple(st_slice.get_shape())
  2818. lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
  2819. return cast(torch.Tensor, lazy)
  2820. @classmethod
  2821. def __torch_function__(cls, func, types, args=(), kwargs=None):
  2822. del types # unused
  2823. if kwargs is None:
  2824. kwargs = {}
  2825. if func is torch.Tensor.numpy:
  2826. return args[0].numpy()
  2827. return cls._wrap_fn(func)(*args, **kwargs)
  2828. def parse_args() -> argparse.Namespace:
  2829. parser = argparse.ArgumentParser(
  2830. description="Convert a huggingface model to a GGML compatible file")
  2831. parser.add_argument(
  2832. "--vocab-only", action="store_true",
  2833. help="extract only the vocab",
  2834. )
  2835. parser.add_argument(
  2836. "--outfile", type=Path,
  2837. help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
  2838. )
  2839. parser.add_argument(
  2840. "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16",
  2841. help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
  2842. )
  2843. parser.add_argument(
  2844. "--bigendian", action="store_true",
  2845. help="model is executed on big endian machine",
  2846. )
  2847. parser.add_argument(
  2848. "model", type=Path,
  2849. help="directory containing model file",
  2850. )
  2851. parser.add_argument(
  2852. "--use-temp-file", action="store_true",
  2853. help="use the tempfile library while processing (helpful when running out of memory, process killed)",
  2854. )
  2855. parser.add_argument(
  2856. "--no-lazy", action="store_true",
  2857. help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
  2858. )
  2859. parser.add_argument(
  2860. "--model-name", type=str, default=None,
  2861. help="name of the model",
  2862. )
  2863. parser.add_argument(
  2864. "--verbose", action="store_true",
  2865. help="increase output verbosity",
  2866. )
  2867. parser.add_argument(
  2868. "--split-max-tensors", type=int, default=0,
  2869. help="max tensors in each split",
  2870. )
  2871. parser.add_argument(
  2872. "--split-max-size", type=str, default="0",
  2873. help="max size per split N(M|G)",
  2874. )
  2875. parser.add_argument(
  2876. "--dry-run", action="store_true",
  2877. help="only print out a split plan and exit, without writing any new files",
  2878. )
  2879. parser.add_argument(
  2880. "--no-tensor-first-split", action="store_true",
  2881. help="do not add tensors to the first split (disabled by default)"
  2882. )
  2883. parser.add_argument(
  2884. "--metadata", type=Path,
  2885. help="Specify the path for an authorship metadata override file"
  2886. )
  2887. return parser.parse_args()
  2888. def split_str_to_n_bytes(split_str: str) -> int:
  2889. if split_str.endswith("K"):
  2890. n = int(split_str[:-1]) * 1000
  2891. elif split_str.endswith("M"):
  2892. n = int(split_str[:-1]) * 1000 * 1000
  2893. elif split_str.endswith("G"):
  2894. n = int(split_str[:-1]) * 1000 * 1000 * 1000
  2895. elif split_str.isnumeric():
  2896. n = int(split_str)
  2897. else:
  2898. raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G")
  2899. if n < 0:
  2900. raise ValueError(f"Invalid split size: {split_str}, must be positive")
  2901. return n
  2902. def main() -> None:
  2903. args = parse_args()
  2904. if args.verbose:
  2905. logging.basicConfig(level=logging.DEBUG)
  2906. else:
  2907. logging.basicConfig(level=logging.INFO)
  2908. dir_model = args.model
  2909. if not dir_model.is_dir():
  2910. logger.error(f'Error: {args.model} is not a directory')
  2911. sys.exit(1)
  2912. ftype_map: dict[str, gguf.LlamaFileType] = {
  2913. "f32": gguf.LlamaFileType.ALL_F32,
  2914. "f16": gguf.LlamaFileType.MOSTLY_F16,
  2915. "bf16": gguf.LlamaFileType.MOSTLY_BF16,
  2916. "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
  2917. "auto": gguf.LlamaFileType.GUESSED,
  2918. }
  2919. is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
  2920. if args.use_temp_file and is_split:
  2921. logger.error("Error: Cannot use temp file when splitting")
  2922. sys.exit(1)
  2923. fname_out = None
  2924. if args.outfile is not None:
  2925. fname_out = args.outfile
  2926. logger.info(f"Loading model: {dir_model.name}")
  2927. hparams = Model.load_hparams(dir_model)
  2928. with torch.inference_mode():
  2929. output_type = ftype_map[args.outtype]
  2930. model_architecture = hparams["architectures"][0]
  2931. try:
  2932. model_class = Model.from_model_architecture(model_architecture)
  2933. except NotImplementedError:
  2934. logger.error(f"Model {model_architecture} is not supported")
  2935. sys.exit(1)
  2936. model_instance = model_class(dir_model=dir_model, ftype=output_type, fname_out=fname_out,
  2937. is_big_endian=args.bigendian, use_temp_file=args.use_temp_file,
  2938. eager=args.no_lazy,
  2939. metadata_override=args.metadata, model_name=args.model_name,
  2940. split_max_tensors=args.split_max_tensors,
  2941. split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run,
  2942. small_first_shard=args.no_tensor_first_split)
  2943. if args.vocab_only:
  2944. logger.info("Exporting model vocab...")
  2945. model_instance.write_vocab()
  2946. logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
  2947. else:
  2948. logger.info("Exporting model...")
  2949. model_instance.write()
  2950. assert model_instance.fname_out is not None
  2951. out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
  2952. logger.info(f"Model successfully exported to {out_path}")
  2953. if __name__ == '__main__':
  2954. main()