convert.py 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. #!/usr/bin/env python
  2. import argparse
  3. import concurrent.futures
  4. import copy
  5. import enum
  6. import faulthandler
  7. import functools
  8. import io
  9. import itertools
  10. import json
  11. import math
  12. import mmap
  13. import pickle
  14. import re
  15. import signal
  16. import struct
  17. import sys
  18. import zipfile
  19. from abc import ABCMeta, abstractmethod
  20. from dataclasses import dataclass
  21. from pathlib import Path
  22. from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List,
  23. Literal, Optional, Sequence, Tuple, TypeVar, Union)
  24. import numpy as np
  25. from sentencepiece import SentencePieceProcessor # type: ignore
  26. if TYPE_CHECKING:
  27. from typing_extensions import TypeAlias
  28. if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
  29. faulthandler.register(signal.SIGUSR1)
  30. NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
  31. @dataclass(frozen=True)
  32. class UnquantizedDataType:
  33. name: str
  34. DT_F16 = UnquantizedDataType('F16')
  35. DT_F32 = UnquantizedDataType('F32')
  36. DT_I32 = UnquantizedDataType('I32')
  37. DT_BF16 = UnquantizedDataType('BF16')
  38. @dataclass(frozen=True)
  39. class QuantizedDataType:
  40. groupsize: int
  41. have_addends: bool
  42. have_g_idx: bool
  43. DT_Q4_0 = QuantizedDataType(groupsize=32, have_addends=False, have_g_idx=False)
  44. DT_Q4_1 = QuantizedDataType(groupsize=32, have_addends=True, have_g_idx=False)
  45. DataType = Union[UnquantizedDataType, QuantizedDataType]
  46. DATA_TYPE_TO_FTYPE: Dict[DataType, int] = {
  47. DT_F32: 0,
  48. DT_F16: 1,
  49. DT_Q4_0: 2,
  50. DT_Q4_1: 3,
  51. }
  52. FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \
  53. {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()}
  54. DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = {
  55. DT_BF16: np.dtype(np.uint16),
  56. DT_F16: np.dtype(np.float16),
  57. DT_F32: np.dtype(np.float32),
  58. DT_I32: np.dtype(np.int32),
  59. }
  60. NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \
  61. {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()}
  62. class GGMLFileType(enum.Enum):
  63. AllF32 = 0
  64. MostlyF16 = 1 # except 1d tensors
  65. MostlyQ4_0 = 2 # except 1d tensors
  66. MostlyQ4_1 = 3 # except 1d tensors
  67. PerLayerIsQ4_1 = 4 # but tok_embeddings.weight and output.weight are F16
  68. def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType:
  69. if len(tensor.shape) == 1:
  70. # 1D tensors are always F32.
  71. return DT_F32
  72. elif self == GGMLFileType.AllF32:
  73. return DT_F32
  74. elif self == GGMLFileType.MostlyF16:
  75. return DT_F16
  76. elif self == GGMLFileType.MostlyQ4_0:
  77. return DT_Q4_0
  78. elif self == GGMLFileType.MostlyQ4_1:
  79. return DT_Q4_1
  80. elif self == GGMLFileType.PerLayerIsQ4_1:
  81. if name in ('output.weight', 'tok_embeddings.weight'):
  82. return DT_F16
  83. else:
  84. return DT_Q4_1
  85. else:
  86. raise ValueError(self)
  87. def make_tensors_list() -> List[str]:
  88. ret = [
  89. 'tok_embeddings.weight',
  90. 'norm.weight',
  91. 'output.weight',
  92. ]
  93. for i in range(80): # maximum number of layer
  94. ret += [
  95. f'layers.{i}.attention.wq.weight',
  96. f'layers.{i}.attention.wk.weight',
  97. f'layers.{i}.attention.wv.weight',
  98. f'layers.{i}.attention.wo.weight',
  99. f'layers.{i}.attention_norm.weight',
  100. f'layers.{i}.feed_forward.w1.weight',
  101. f'layers.{i}.feed_forward.w2.weight',
  102. f'layers.{i}.feed_forward.w3.weight',
  103. f'layers.{i}.ffn_norm.weight',
  104. ]
  105. return ret
  106. TENSORS_LIST = make_tensors_list()
  107. TENSORS_SET = set(TENSORS_LIST)
  108. def find_n_mult(n_ff: int, n_embd: int) -> int:
  109. # hardcoded magic range
  110. for n_mult in range(256, 1, -1):
  111. calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
  112. if calc_ff == n_ff:
  113. return n_mult
  114. raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
  115. @dataclass
  116. class Params:
  117. n_vocab: int
  118. n_embd: int
  119. n_mult: int
  120. n_head: int
  121. n_layer: int
  122. @staticmethod
  123. def guessed(model: 'LazyModel') -> 'Params':
  124. # try transformer naming first
  125. n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape
  126. # try transformer naming first
  127. if "model.layers.0.self_attn.q_proj.weight" in model:
  128. n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
  129. elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
  130. n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
  131. else:
  132. n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
  133. if n_layer < 1:
  134. raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
  135. "Suggestion: provide 'config.json' of the model in the same directory containing model files.")
  136. n_head=n_embd // 128 # guessed
  137. return Params(
  138. n_vocab = n_vocab,
  139. n_embd = n_embd,
  140. n_mult = 256,
  141. n_head = n_head,
  142. n_layer = n_layer,
  143. )
  144. @staticmethod
  145. def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
  146. config = json.load(open(config_path))
  147. n_vocab = config["vocab_size"];
  148. n_embd = config["hidden_size"];
  149. n_head = config["num_attention_heads"];
  150. n_layer = config["num_hidden_layers"];
  151. n_ff = config["intermediate_size"];
  152. n_mult = find_n_mult(n_ff, n_embd);
  153. return Params(
  154. n_vocab = n_vocab,
  155. n_embd = n_embd,
  156. n_mult = n_mult,
  157. n_head = n_head,
  158. n_layer = n_layer,
  159. )
  160. # LLaMA v2 70B params.json
  161. # {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1
  162. @staticmethod
  163. def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
  164. config = json.load(open(config_path))
  165. n_vocab = config["vocab_size"];
  166. n_embd = config["dim"];
  167. n_head = config["n_heads"];
  168. n_layer = config["n_layers"];
  169. n_mult = config["multiple_of"];
  170. if n_vocab == -1:
  171. n_vocab = model["tok_embeddings.weight"].shape[0]
  172. return Params(
  173. n_vocab = n_vocab,
  174. n_embd = n_embd,
  175. n_mult = n_mult,
  176. n_head = n_head,
  177. n_layer = n_layer,
  178. )
  179. @staticmethod
  180. def load(model_plus: 'ModelPlus') -> 'Params':
  181. hf_config_path = model_plus.paths[0].parent / "config.json"
  182. orig_config_path = model_plus.paths[0].parent / "params.json"
  183. if hf_config_path.exists():
  184. params = Params.loadHFTransformerJson(model_plus.model, hf_config_path)
  185. elif orig_config_path.exists():
  186. params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path)
  187. else:
  188. params = Params.guessed(model_plus.model)
  189. print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}')
  190. return params
  191. class SentencePieceVocab:
  192. def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], vocabtype: Optional[str]) -> None:
  193. self.vocabtype = vocabtype
  194. if self.vocabtype == "bpe":
  195. self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read())
  196. else:
  197. self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
  198. added_tokens: Dict[str, int]
  199. if fname_added_tokens is not None:
  200. added_tokens = json.load(open(fname_added_tokens))
  201. else:
  202. added_tokens = {}
  203. if self.vocabtype == "bpe":
  204. vocab_size: int = len(self.sentencepiece_tokenizer)
  205. else:
  206. vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
  207. expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
  208. actual_ids = sorted(added_tokens.values())
  209. if expected_ids != actual_ids:
  210. raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
  211. items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
  212. self.added_tokens_list = [text for (text, idx) in items]
  213. self.vocab_size_base: int = vocab_size
  214. self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
  215. self.fname_tokenizer = fname_tokenizer
  216. self.fname_added_tokens = fname_added_tokens
  217. def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
  218. tokenizer = self.sentencepiece_tokenizer
  219. if self.vocabtype == "bpe":
  220. from transformers.models.gpt2 import tokenization_gpt2
  221. byte_encoder = tokenization_gpt2.bytes_to_unicode()
  222. byte_decoder = {v: k for k, v in byte_encoder.items()}
  223. for i, item in enumerate(tokenizer):
  224. text: bytes
  225. text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]])
  226. score: float = -i
  227. yield text, score
  228. else:
  229. for i in range(tokenizer.vocab_size()):
  230. text: bytes
  231. if tokenizer.is_unknown(i):
  232. text = " \u2047 ".encode("utf-8")
  233. elif tokenizer.is_control(i):
  234. text = b""
  235. elif tokenizer.is_byte(i):
  236. piece = tokenizer.id_to_piece(i)
  237. if len(piece) != 6:
  238. raise Exception(f"Invalid token: {piece}")
  239. byte_value = int(piece[3:-1], 16)
  240. text = struct.pack("B", byte_value)
  241. else:
  242. text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
  243. score: float = tokenizer.get_score(i)
  244. yield text, score
  245. def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
  246. for text in self.added_tokens_list:
  247. score = -1000.0
  248. yield text.encode("utf-8"), score
  249. def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
  250. yield from self.sentencepiece_tokens()
  251. yield from self.added_tokens()
  252. def __repr__(self) -> str:
  253. return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
  254. class GGMLVocab:
  255. def __init__(self, tokens: List[Tuple[bytes, float]]):
  256. self.tokens = tokens
  257. self.vocab_size = len(tokens)
  258. def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
  259. return self.tokens
  260. def __repr__(self) -> str:
  261. return f"<GGMLVocab with {self.vocab_size} tokens>"
  262. Vocab = Union[SentencePieceVocab, GGMLVocab]
  263. def permute(weights: NDArray, n_head: int) -> NDArray:
  264. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  265. .swapaxes(1, 2)
  266. .reshape(weights.shape))
  267. def dequantize_q4(qvalues_pack32: NDArray, scales: NDArray, addends: Optional[NDArray], g_idx: Optional[NDArray]) -> NDArray:
  268. # First reinterpret each row from a list of int32s containing 8 values each
  269. # to a list of uint8s containing 2 values each.
  270. qvalues_pack8 = qvalues_pack32.view(np.uint8)
  271. # Then split out the two values per int8 (which requires an actual
  272. # conversion because numpy doesn't natively support int4s).
  273. qvalues = np.zeros([qvalues_pack8.shape[0], qvalues_pack8.shape[1] * 2], dtype=np.uint8)
  274. qvalues[:, 0::2] = qvalues_pack8 & 0xf
  275. qvalues[:, 1::2] = qvalues_pack8 >> 4
  276. assert addends is None or addends.shape == scales.shape
  277. assert qvalues.shape[0] == scales.shape[0]
  278. assert qvalues.shape[1] % scales.shape[1] == 0
  279. if g_idx is None:
  280. repeat_count = qvalues.shape[1] // scales.shape[1]
  281. scales = scales[:, :, np.newaxis]
  282. if addends is not None:
  283. addends = addends[:, :, np.newaxis]
  284. # Reshape so that the below computation broadcasts over scales and addends:
  285. qvalues.shape = (qvalues.shape[0], scales.shape[1], int(repeat_count))
  286. else:
  287. # In this case the scale and addend is selected for each column by g_idx:
  288. assert addends is not None
  289. scales = scales[:, g_idx]
  290. addends = addends[:, g_idx]
  291. if addends is None:
  292. # Q4_0
  293. qvalues = qvalues.view(np.int8)
  294. qvalues -= 8
  295. # And do the actual 'value = scale * qvalue + addend' computation.
  296. values = scales * qvalues
  297. if addends is not None:
  298. values += addends
  299. if g_idx is None:
  300. values.shape = (values.shape[0], values.shape[1] * values.shape[2])
  301. return values
  302. class Tensor(metaclass=ABCMeta):
  303. data_type: DataType
  304. @abstractmethod
  305. def astype(self, data_type: DataType) -> 'Tensor': ...
  306. @abstractmethod
  307. def permute(self, n_head: int) -> 'Tensor': ...
  308. @abstractmethod
  309. def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
  310. @abstractmethod
  311. def part(self, n_part: int) -> 'UnquantizedTensor': ...
  312. @abstractmethod
  313. def to_ggml(self) -> 'GGMLCompatibleTensor': ...
  314. def bf16_to_fp32(bf16_arr: np.ndarray) -> np.ndarray:
  315. assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}"
  316. fp32_arr = bf16_arr.astype(np.uint32) << 16
  317. return fp32_arr.view(np.float32)
  318. class UnquantizedTensor(Tensor):
  319. def __init__(self, ndarray: NDArray) -> None:
  320. assert isinstance(ndarray, np.ndarray)
  321. self.ndarray = ndarray
  322. self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype]
  323. def astype(self, data_type: DataType) -> Tensor:
  324. dtype = DATA_TYPE_TO_NUMPY[data_type]
  325. if self.data_type == DT_BF16:
  326. self.ndarray = bf16_to_fp32(self.ndarray)
  327. return UnquantizedTensor(self.ndarray.astype(dtype))
  328. def to_ggml(self) -> 'UnquantizedTensor':
  329. return self
  330. def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
  331. r = self.ndarray.shape[0] // 3
  332. return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head))
  333. def part(self, n_part: int) -> 'UnquantizedTensor':
  334. r = self.ndarray.shape[0] // 3
  335. return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
  336. def permute(self, n_head: int) -> 'UnquantizedTensor':
  337. return UnquantizedTensor(permute(self.ndarray, n_head))
  338. def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
  339. tensor = lazy_tensor.load()
  340. assert isinstance(tensor, UnquantizedTensor)
  341. # double-check:
  342. actual_shape = list(tensor.ndarray.shape)
  343. assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape)
  344. if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype:
  345. if convert:
  346. tensor.ndarray = tensor.ndarray.astype(expected_dtype)
  347. else:
  348. raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}')
  349. return tensor.ndarray
  350. class GGMLQuantizedTensor(Tensor):
  351. data_type: QuantizedDataType
  352. def __init__(self, ndarray: NDArray, shape: List[int], data_type: DataType) -> None:
  353. rows, columns = shape
  354. assert data_type in (DT_Q4_1, DT_Q4_0) # for now
  355. assert isinstance(data_type, QuantizedDataType) # redundant, but mypy complains without this
  356. assert columns % data_type.groupsize == 0
  357. words_in_block = 6 if data_type == DT_Q4_1 else 5
  358. self.ndarray = ndarray.view(dtype=np.uint32).reshape((rows, columns // data_type.groupsize, words_in_block))
  359. self.shape = shape[:]
  360. self.data_type = data_type
  361. def astype(self, data_type: DataType) -> Tensor:
  362. if data_type == self.data_type:
  363. return self
  364. scales = self.ndarray[:, :, 0].view(np.float32)
  365. if self.data_type.have_addends:
  366. addends = self.ndarray[:, :, 1].view(np.float32)
  367. else:
  368. addends = None
  369. qweights = self.ndarray[:, :, -4:].reshape([self.shape[0], self.shape[1] // 8])
  370. dq = dequantize_q4(qweights, scales, addends, g_idx=None)
  371. return UnquantizedTensor(dq).astype(data_type)
  372. def to_ggml(self) -> 'GGMLQuantizedTensor':
  373. return self
  374. def permute(self, n_head: int) -> 'GGMLQuantizedTensor':
  375. return GGMLQuantizedTensor(permute(self.ndarray, n_head), self.shape, self.data_type)
  376. GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor]
  377. class DeferredPermutedTensor(Tensor):
  378. def __init__(self, base: Tensor, n_head: int) -> None:
  379. self.base = base
  380. self.n_head = n_head
  381. self.data_type = self.base.data_type
  382. def astype(self, data_type: DataType) -> Tensor:
  383. return self.base.astype(data_type).permute(self.n_head)
  384. def to_ggml(self) -> GGMLCompatibleTensor:
  385. return self.base.to_ggml().permute(self.n_head)
  386. def permute(self, n_head: int) -> Tensor:
  387. raise Exception("shouldn't permute twice")
  388. class GPTQForLLaMaQuantizedTensor(Tensor):
  389. def __init__(self, model: 'LazyModel', namebase: str) -> None:
  390. qweight = load_unquantized(model[f"{namebase}.qweight"], np.int32)
  391. scales = load_unquantized(model[f"{namebase}.scales"], np.float32, convert=True)
  392. bias = model.get(f"{namebase}.bias")
  393. if bias is not None:
  394. # Q4_1 does not support bias; good thing the bias is always all zeros.
  395. assert not np.any(load_unquantized(bias))
  396. if f"{namebase}.zeros" in model:
  397. zeros = load_unquantized(model[f"{namebase}.zeros"], np.float32)
  398. else:
  399. qzeros = load_unquantized(model[f"{namebase}.qzeros"], np.int32)
  400. assert qzeros.dtype == np.int32
  401. zeros = dequantize_q4(qzeros, scales, scales, g_idx=None)
  402. assert zeros.dtype == np.float32
  403. assert zeros.shape == scales.shape
  404. # Output is transposed compared to the input, and addends have their sign flipped.
  405. # Scales and zeros similarly must be transposed but only for newer
  406. # versions of GPTQ-for-LLaMa; the older versions can be identified by
  407. # having shape (n_embd, 1).
  408. qweight = qweight.T
  409. if scales.shape[1] != 1:
  410. scales = scales.T
  411. zeros = zeros.T
  412. # Output also has signs flipped for the addends.
  413. self.qweight = qweight
  414. self.scales = scales
  415. self.addends = -zeros
  416. self.g_idx: Optional[NDArray]
  417. if f"{namebase}.g_idx" in model:
  418. self.g_idx = load_unquantized(model[f"{namebase}.g_idx"], np.int32)
  419. assert self.g_idx.shape == (qweight.shape[1] * 8,)
  420. else:
  421. self.g_idx = None
  422. self.shape = [self.qweight.shape[0], self.qweight.shape[1] * 8]
  423. self.data_type = QuantizedDataType(groupsize=self.groupsize(), have_addends=True,
  424. have_g_idx=(self.g_idx is not None))
  425. def inspect(self, row: int, col: int) -> None:
  426. '''For debugging.'''
  427. qweight = (self.qweight[row, col // 8] >> (4 * (col & 7))) & 0xf
  428. if self.g_idx is not None:
  429. group = self.g_idx[col]
  430. else:
  431. group = int(col // self.groupsize())
  432. scale = self.scales[row, group]
  433. addend = self.addends[row, group]
  434. with np.printoptions(precision=None, suppress=True):
  435. print(f'scale:{scale} addend:{addend} qweight:{qweight}')
  436. print('possible values:', np.arange(16) * scale + addend)
  437. print('actual value:', qweight * scale + addend)
  438. def astype(self, data_type: DataType) -> Tensor:
  439. if isinstance(data_type, QuantizedDataType):
  440. assert self.g_idx is None and data_type.have_addends is True and data_type.have_g_idx is False
  441. return self.regroup(data_type.groupsize)
  442. dequantized = dequantize_q4(np.ascontiguousarray(self.qweight), self.scales, self.addends, self.g_idx)
  443. return UnquantizedTensor(dequantized).astype(data_type)
  444. def groupsize(self) -> int:
  445. assert self.addends.shape == self.scales.shape
  446. assert self.shape[1] % self.scales.shape[1] == 0
  447. return self.shape[1] // self.scales.shape[1]
  448. def regroup(self, new_groupsize: int = 32) -> 'GPTQForLLaMaQuantizedTensor':
  449. # Old versions of GPTQ-for-LLaMa shared scales and addends between all the
  450. # columns in a row. Newer versions share them between every set of N
  451. # columns in a row, where N is the `groupsize` parameter, usually 128. The
  452. # output format shares them between every set of 32 columns. To handle
  453. # this, duplicate scales and addends for every smaller group.
  454. # (In the above, 'row' and 'column' are in the sense of the output.)
  455. assert self.g_idx is None
  456. old_groupsize = self.groupsize()
  457. assert old_groupsize >= new_groupsize and old_groupsize % new_groupsize == 0, old_groupsize
  458. ret = copy.copy(self)
  459. ret.addends = self.addends.repeat(old_groupsize // new_groupsize, axis=1)
  460. ret.scales = self.scales.repeat(old_groupsize // new_groupsize, axis=1)
  461. ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False)
  462. return ret
  463. def permute(self, n_head: int) -> Tensor:
  464. return DeferredPermutedTensor(self, n_head)
  465. def to_ggml(self) -> GGMLQuantizedTensor:
  466. # The output format looks like this:
  467. # For each row:
  468. # For each group of 32 columns:
  469. # - addend (float32, 4 bytes)
  470. # - scale (float32, 4 bytes)
  471. # - weights (int4 * 32, 16 bytes)
  472. if self.groupsize() != 32:
  473. raise Exception("should have been regrouped before converting to ggml")
  474. # Since the output format is mixed between integers and floats, we have
  475. # to hackily view the floats as int32s just so numpy will let us
  476. # concatenate them.
  477. addends_view = self.addends.view(dtype=np.int32)[:, :, np.newaxis]
  478. scales_view = self.scales.view(dtype=np.int32)[:, :, np.newaxis]
  479. # Split into groups of 4 columns (i.e. 32 columns of quantized data):
  480. grouped = self.qweight.reshape([self.qweight.shape[0], self.qweight.shape[1] // 4, 4])
  481. # And concatenate:
  482. grouped = np.concatenate([scales_view, addends_view, grouped], axis=2, casting='no')
  483. return GGMLQuantizedTensor(grouped, self.shape, DT_Q4_1)
  484. @dataclass
  485. class LazyTensor:
  486. _load: Callable[[], Tensor]
  487. shape: List[int]
  488. data_type: DataType
  489. description: str
  490. def load(self) -> Tensor:
  491. ret = self._load()
  492. assert ret.data_type == self.data_type, (self.data_type, ret.data_type, self.description)
  493. return ret
  494. def astype(self, data_type: DataType) -> 'LazyTensor':
  495. self.validate_conversion_to(data_type)
  496. def load() -> Tensor:
  497. return self.load().astype(data_type)
  498. return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}')
  499. def validate_conversion_to(self, data_type: DataType) -> None:
  500. if data_type == self.data_type:
  501. return
  502. if isinstance(data_type, QuantizedDataType):
  503. if not isinstance(self.data_type, QuantizedDataType):
  504. raise Exception(f"Can't turn an unquantized tensor into a quantized type ({data_type})")
  505. if self.data_type.have_g_idx:
  506. sys.stderr.write(
  507. "Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), "
  508. "which is not yet natively supported by GGML. "
  509. "For now you can still convert this model by passing `--outtype f16` to dequantize, "
  510. "but that will result in a much larger output file for no quality benefit.\n")
  511. sys.exit(1)
  512. assert not data_type.have_g_idx and self.data_type.have_addends and data_type.have_addends
  513. LazyModel = Dict[str, LazyTensor]
  514. @dataclass
  515. class ModelPlus:
  516. model: LazyModel
  517. paths: List[Path] # Where this was read from.
  518. format: Literal['ggml', 'torch', 'safetensors']
  519. vocab: Optional[Vocab] # For GGML models (which have vocab built in), the vocab.
  520. def merge_sharded(models: List[LazyModel]) -> LazyModel:
  521. # Original LLaMA models have each file contain one part of each tensor.
  522. # Use a dict instead of a set to preserve order.
  523. names = {name: None for model in models for name in model}
  524. def convert(name: str) -> LazyTensor:
  525. lazy_tensors: List[LazyTensor] = [model[name] for model in models]
  526. if len(lazy_tensors) == 1:
  527. # only one file; don't go through this procedure since there might
  528. # be quantized tensors
  529. return lazy_tensors[0]
  530. if len(lazy_tensors[0].shape) == 1:
  531. # the tensor is just duplicated in every file
  532. return lazy_tensors[0]
  533. if name.startswith('tok_embeddings.') or \
  534. name.endswith('.attention.wo.weight') or \
  535. name.endswith('.feed_forward.w2.weight'):
  536. # split by columns
  537. axis = 1
  538. else:
  539. # split by rows
  540. axis = 0
  541. concatenated_shape = list(lazy_tensors[0].shape)
  542. concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors)
  543. def load() -> UnquantizedTensor:
  544. ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors]
  545. concatenated: NDArray = np.concatenate(ndarrays, axis=axis)
  546. return UnquantizedTensor(concatenated)
  547. description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]'
  548. return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description)
  549. return {name: convert(name) for name in names}
  550. def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
  551. formats = set(mp.format for mp in models_plus)
  552. assert len(formats) == 1, "different formats?"
  553. format = formats.pop()
  554. paths = [path for mp in models_plus for path in mp.paths]
  555. # Use the first non-None vocab, if any.
  556. try:
  557. vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None)
  558. except StopIteration:
  559. vocab = None
  560. if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
  561. # Transformers models put different tensors in different files, but
  562. # don't split indivdual tensors between files.
  563. model: LazyModel = {}
  564. for mp in models_plus:
  565. model.update(mp.model)
  566. else:
  567. model = merge_sharded([mp.model for mp in models_plus])
  568. return ModelPlus(model, paths, format, vocab)
  569. def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor:
  570. def load() -> Tensor:
  571. return lazy_tensor.load().permute(n_head)
  572. return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
  573. def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
  574. def load() -> Tensor:
  575. return lazy_tensor.load().permute_part(n_part, n_head)
  576. s = lazy_tensor.shape.copy()
  577. s[0] = s[0] // 3
  578. return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
  579. def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
  580. def load() -> Tensor:
  581. return lazy_tensor.load().part(n_part)
  582. s = lazy_tensor.shape.copy()
  583. s[0] = s[0] // 3
  584. return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
  585. def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
  586. out: LazyModel = {}
  587. out["tok_embeddings.weight"] = model["model.embed_tokens.weight"]
  588. out["norm.weight"] = model["model.norm.weight"]
  589. out["output.weight"] = model["lm_head.weight"]
  590. for i in itertools.count():
  591. if f"model.layers.{i}.self_attn.q_proj.weight" in model:
  592. out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
  593. out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
  594. out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
  595. elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
  596. out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
  597. out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head)
  598. out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
  599. else:
  600. break
  601. out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"]
  602. out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"]
  603. out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"]
  604. out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"]
  605. out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"]
  606. out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"]
  607. return out
  608. def handle_quantization(model: LazyModel) -> LazyModel:
  609. '''Convert a model with entries for 'foo.qweight', 'foo.scales', etc.
  610. (which resolve to UnquantizedTensors with the raw data) to one with entries
  611. for 'foo.weight' (which resolve to QuantizedTensors).
  612. '''
  613. def convert(name: str) -> Tuple[str, LazyTensor]:
  614. if name.endswith(".qweight"):
  615. namebase = name.rsplit('.', 1)[0]
  616. orig_name = namebase + ".weight"
  617. lazy_tensor = model[name]
  618. assert len(lazy_tensor.shape) == 2
  619. real_shape = [lazy_tensor.shape[1], lazy_tensor.shape[0] * 8]
  620. # Calculate type. This replicates the logic in
  621. # GPTQForLLaMaQuantizedTensor (which is executed when the modelis
  622. # actually loaded).
  623. lazy_scales = model[f"{namebase}.scales"]
  624. scales_width = 1 if lazy_scales.shape[1] == 1 else lazy_scales.shape[0]
  625. assert real_shape[1] % scales_width == 0
  626. groupsize = real_shape[1] // scales_width
  627. have_g_idx = f"{namebase}.g_idx" in model
  628. data_type = QuantizedDataType(groupsize=groupsize, have_addends=True, have_g_idx=have_g_idx)
  629. def load() -> Tensor:
  630. return GPTQForLLaMaQuantizedTensor(model, namebase)
  631. return (orig_name, LazyTensor(load, real_shape, data_type, '[quantized]'))
  632. else:
  633. return (name, model[name])
  634. return dict(convert(name) for name in model)
  635. # Functionality that simulates `torch.load` but where individual tensors are
  636. # only loaded into memory on demand, not all at once.
  637. # PyTorch can't do this natively as of time of writing:
  638. # - https://github.com/pytorch/pytorch/issues/64327
  639. # This allows us to de-shard without multiplying RAM usage, and also
  640. # conveniently drops the PyTorch dependency (though we still need numpy).
  641. @dataclass
  642. class LazyStorageKind:
  643. data_type: DataType
  644. @dataclass
  645. class LazyStorage:
  646. load: Callable[[int, int], NDArray]
  647. kind: LazyStorageKind
  648. description: str
  649. class LazyUnpickler(pickle.Unpickler):
  650. def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile):
  651. super().__init__(fp)
  652. self.data_base_path = data_base_path
  653. self.zip_file = zip_file
  654. def persistent_load(self, pid: Any) -> Any:
  655. assert pid[0] == 'storage'
  656. assert isinstance(pid[1], LazyStorageKind)
  657. data_type = pid[1].data_type
  658. filename_stem = pid[2]
  659. filename = self.data_base_path + '/' + filename_stem
  660. info = self.zip_file.getinfo(filename)
  661. def load(offset: int, elm_count: int) -> NDArray:
  662. dtype = DATA_TYPE_TO_NUMPY.get(data_type)
  663. if dtype is None:
  664. raise Exception("tensor stored in unsupported format")
  665. fp = self.zip_file.open(info)
  666. fp.seek(offset * dtype.itemsize)
  667. size = elm_count * dtype.itemsize
  668. data = fp.read(size)
  669. assert len(data) == size
  670. return np.frombuffer(data, dtype)
  671. description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}'
  672. return LazyStorage(load=load, kind=pid[1], description=description)
  673. # @staticmethod
  674. def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any,
  675. # pyright: ignore[reportSelfClsParameterName]
  676. requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor:
  677. assert isinstance(storage, LazyStorage)
  678. def load() -> UnquantizedTensor:
  679. elm_count = stride[0] * size[0]
  680. return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size))
  681. description = f'pickled storage_offset={storage_offset} in {storage.description}'
  682. return LazyTensor(load, list(size), storage.kind.data_type, description)
  683. # @staticmethod
  684. def rebuild_from_type_v2(func, new_type, args, state):
  685. return func(*args)
  686. CLASSES: Dict[Any, Any] = {
  687. ('torch._tensor', '_rebuild_from_type_v2'): rebuild_from_type_v2,
  688. ('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2,
  689. ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16),
  690. ('torch', 'HalfStorage'): LazyStorageKind(DT_F16),
  691. ('torch', 'FloatStorage'): LazyStorageKind(DT_F32),
  692. ('torch', 'IntStorage'): LazyStorageKind(DT_I32),
  693. ('torch', 'Tensor'): LazyTensor,
  694. }
  695. def find_class(self, module: str, name: str) -> Any:
  696. if not module.startswith('torch'):
  697. return super().find_class(module, name)
  698. return self.CLASSES[(module, name)]
  699. def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus:
  700. zf = zipfile.ZipFile(outer_fp)
  701. pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')]
  702. assert len(pickle_paths) == 1, pickle_paths
  703. pickle_fp = zf.open(pickle_paths[0], 'r')
  704. unpickler = LazyUnpickler(pickle_fp,
  705. data_base_path=pickle_paths[0][:-4],
  706. zip_file=zf)
  707. model = unpickler.load()
  708. as_dict = dict(model.items())
  709. return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None)
  710. SAFETENSORS_DATA_TYPES: Dict[str, DataType] = {
  711. 'BF16': DT_BF16,
  712. 'F16': DT_F16,
  713. 'F32': DT_F32,
  714. 'I32': DT_I32,
  715. }
  716. def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus:
  717. header_size, = struct.unpack('<Q', fp.read(8))
  718. header: Dict[str, Dict[str, Any]] = json.loads(fp.read(header_size))
  719. # Use mmap for the actual data to avoid race conditions with the file offset.
  720. mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
  721. byte_buf = mapped[8 + header_size:]
  722. def convert(info: Dict[str, Any]) -> LazyTensor:
  723. data_type = SAFETENSORS_DATA_TYPES[info['dtype']]
  724. numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
  725. shape: List[int] = info['shape']
  726. begin, end = info['data_offsets']
  727. assert 0 <= begin <= end <= len(byte_buf)
  728. assert end - begin == math.prod(shape) * numpy_dtype.itemsize
  729. buf = byte_buf[begin:end]
  730. def load() -> UnquantizedTensor:
  731. return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
  732. description = f'safetensors begin={begin} end={end} type={data_type} path={path}'
  733. return LazyTensor(load, shape, data_type, description)
  734. model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'}
  735. return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None)
  736. def must_read(fp: IO[bytes], length: int) -> bytes:
  737. ret = fp.read(length)
  738. if len(ret) < length:
  739. raise Exception("unexpectedly reached end of file")
  740. return ret
  741. def lazy_load_ggml_file(fp: io.BufferedReader, path: Path) -> ModelPlus:
  742. magic = must_read(fp, 4)[::-1]
  743. if magic in (b'ggmf', b'ggjt'):
  744. version, = struct.unpack("i", must_read(fp, 4))
  745. assert version == 1
  746. else:
  747. assert magic == b'ggml'
  748. version = None
  749. n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type = struct.unpack('<7i', must_read(fp, 28))
  750. tokens: List[Tuple[bytes, float]] = []
  751. for i in range(n_vocab):
  752. if i == 32000:
  753. # HACK: GPT4All messed with the format without changing the magic
  754. # number. Specifically, they changed the vocab section to contain
  755. # `n_vocab - 1` tokens instead of `n_vocab` (i.e. omitting the
  756. # extra pad token). Try to detect if we're reading a file like
  757. # this.
  758. orig_pos = fp.tell()
  759. fp.seek(20, io.SEEK_CUR)
  760. is_gpt4all = fp.read(21) == b'tok_embeddings.weight'
  761. fp.seek(orig_pos)
  762. if is_gpt4all:
  763. break
  764. length, = struct.unpack("i", must_read(fp, 4))
  765. text = must_read(fp, length)
  766. if magic != b'ggml':
  767. score, = struct.unpack("f", must_read(fp, 4))
  768. tokens.append((text, score))
  769. vocab = GGMLVocab(tokens) if magic != b'ggml' else None
  770. model: LazyModel = {}
  771. # Use mmap for the actual data to avoid race conditions with the file offset.
  772. off = fp.raw.tell()
  773. mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
  774. fp.raw.seek(off) # needed on Windows
  775. def read_tensor() -> None: # this is a function so that variables captured in `load` don't change
  776. shape_len, name_len, ftype = struct.unpack("iii", must_read(fp, 12))
  777. assert 0 <= shape_len <= 3
  778. shape: List[int] = list(struct.unpack(f"{shape_len}i", must_read(fp, 4 * shape_len)))
  779. shape = shape[::-1]
  780. name = must_read(fp, name_len).decode('utf-8')
  781. data_type = FTYPE_TO_DATA_TYPE[ftype]
  782. if magic == b'ggjt':
  783. fp.seek((fp.tell() + 31) & -32)
  784. if data_type == DT_Q4_1:
  785. # See GPTQForLLaMaQuantizedTensor.ggml_ndarray()
  786. size = 24 * (shape[1] // 32) * shape[0]
  787. elif data_type == DT_Q4_0:
  788. size = 20 * (shape[1] // 32) * shape[0]
  789. else:
  790. numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
  791. elm_count = math.prod(shape)
  792. size = elm_count * numpy_dtype.itemsize
  793. offset = fp.tell()
  794. buf = mapped[offset:offset+size]
  795. fp.seek(size, io.SEEK_CUR)
  796. def load() -> Tensor:
  797. if isinstance(data_type, QuantizedDataType):
  798. ndarray = np.frombuffer(buf, dtype=np.uint32)
  799. return GGMLQuantizedTensor(ndarray, shape, data_type)
  800. else:
  801. return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
  802. description = f'ggml offset={offset} type={data_type} path={path}'
  803. model[name] = LazyTensor(load, shape, data_type, description)
  804. while fp.read(1) != b'':
  805. fp.seek(-1, io.SEEK_CUR)
  806. read_tensor()
  807. return ModelPlus(model=model, paths=[path], format='ggml', vocab=vocab)
  808. @functools.lru_cache(maxsize=None)
  809. def lazy_load_file(path: Path) -> ModelPlus:
  810. fp = open(path, 'rb')
  811. first8 = fp.read(8)
  812. fp.seek(0)
  813. if first8[:2] == b'PK':
  814. # A zip file, i.e. PyTorch format
  815. return lazy_load_torch_file(fp, path)
  816. elif first8[2:4] == b'gg':
  817. # GGML format
  818. return lazy_load_ggml_file(fp, path)
  819. elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024:
  820. # Probably safetensors
  821. return lazy_load_safetensors_file(fp, path)
  822. else:
  823. raise ValueError(f"unknown format: {path}")
  824. In = TypeVar('In')
  825. Out = TypeVar('Out')
  826. def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]:
  827. '''Parallel map, but with backpressure. If the caller doesn't call `next`
  828. fast enough, this will stop calling `func` at some point rather than
  829. letting results pile up in memory. Specifically, there is a max of one
  830. output value buffered per thread.'''
  831. with concurrent.futures.ThreadPoolExecutor() as executor:
  832. futures: List[concurrent.futures.Future[Out]] = []
  833. items_rev = list(iterable)[::-1]
  834. for i in range(min(concurrency, len(items_rev))):
  835. futures.append(executor.submit(func, items_rev.pop()))
  836. while futures:
  837. result = futures.pop(0).result()
  838. if items_rev:
  839. futures.append(executor.submit(func, items_rev.pop()))
  840. yield result
  841. def check_vocab_size(params: Params, vocab: Vocab) -> None:
  842. if params.n_vocab != vocab.vocab_size:
  843. # GGMLVocab comes from the same file as the model so shouldn't mismatch:
  844. assert isinstance(vocab, SentencePieceVocab)
  845. if params.n_vocab == vocab.vocab_size_base:
  846. print("Ignoring added_tokens.json since model matches vocab size without it.")
  847. vocab.added_tokens_list = []
  848. vocab.vocab_size = vocab.vocab_size_base
  849. return
  850. msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}"
  851. if vocab.fname_added_tokens is not None:
  852. msg += f" combined with {vocab.fname_added_tokens}"
  853. msg += f" has {vocab.vocab_size})."
  854. if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20 and vocab.fname_added_tokens is None:
  855. msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})."
  856. raise Exception(msg)
  857. class OutputFile:
  858. def __init__(self, fname_out: Path) -> None:
  859. self.fout = open(fname_out, "wb")
  860. def write_file_header(self, params: Params, file_type: GGMLFileType) -> None:
  861. self.fout.write(b"ggjt"[::-1]) # magic
  862. values = [
  863. 1, # file version
  864. params.n_vocab,
  865. params.n_embd,
  866. params.n_mult,
  867. params.n_head,
  868. params.n_layer,
  869. params.n_embd // params.n_head, # rot (obsolete)
  870. file_type.value,
  871. ]
  872. self.fout.write(struct.pack("i" * len(values), *values))
  873. def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None:
  874. sname = name.encode('utf-8')
  875. self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type]))
  876. self.fout.write(struct.pack("i" * len(shape), *shape[::-1]))
  877. self.fout.write(sname)
  878. self.fout.seek((self.fout.tell() + 31) & -32)
  879. def write_vocab(self, vocab: Vocab) -> None:
  880. for text, score in vocab.all_tokens():
  881. self.fout.write(struct.pack("i", len(text)))
  882. self.fout.write(text)
  883. self.fout.write(struct.pack("f", score))
  884. @staticmethod
  885. def write_vocab_only(fname_out: Path, vocab: Vocab) -> None:
  886. of = OutputFile(fname_out)
  887. params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0, n_head=1, n_layer=0)
  888. of = OutputFile(fname_out)
  889. of.write_file_header(params, file_type=GGMLFileType.AllF32)
  890. of.write_vocab(vocab)
  891. of.fout.close()
  892. @staticmethod
  893. def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None:
  894. check_vocab_size(params, vocab)
  895. of = OutputFile(fname_out)
  896. of.write_file_header(params, file_type)
  897. print("Writing vocab...")
  898. of.write_vocab(vocab)
  899. def do_item(item: Tuple[str, LazyTensor]) -> NDArray:
  900. name, lazy_tensor = item
  901. return lazy_tensor.load().to_ggml().ndarray
  902. ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8)
  903. for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
  904. size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
  905. padi = len(str(len(model)))
  906. print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}")
  907. of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type)
  908. ndarray.tofile(of.fout)
  909. of.fout.close()
  910. def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType:
  911. wq_type = model["layers.0.attention.wq.weight"].data_type
  912. if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)):
  913. return GGMLFileType.AllF32
  914. if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16):
  915. return GGMLFileType.MostlyF16
  916. if output_type_str == "q4_1" or (output_type_str is None and isinstance(wq_type, QuantizedDataType) and
  917. wq_type.have_addends):
  918. if isinstance(model["output.weight"].data_type, QuantizedDataType):
  919. return GGMLFileType.MostlyQ4_1
  920. else:
  921. return GGMLFileType.PerLayerIsQ4_1
  922. if output_type_str == "q4_0" or (output_type_str is None and isinstance(wq_type, QuantizedDataType)):
  923. return GGMLFileType.MostlyQ4_0
  924. name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()}
  925. raise Exception(f"Unexpected combination of types: {name_to_type}")
  926. def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel:
  927. model = handle_quantization(model)
  928. if "lm_head.weight" in model:
  929. model = convert_transformers_to_orig(model, params)
  930. model = filter_and_sort_tensors(model)
  931. return model
  932. def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
  933. return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
  934. for (name, tensor) in model.items()}
  935. def nth_multifile_path(path: Path, n: int) -> Optional[Path]:
  936. '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
  937. the nth path in the model.
  938. '''
  939. # Support the following patterns:
  940. patterns: List[Tuple[str, str]] = [
  941. # - x.00.pth, x.01.pth, etc.
  942. (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'),
  943. # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc.
  944. (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'),
  945. # x.bin, x.bin.1, etc.
  946. (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}')
  947. ]
  948. for regex, replacement in patterns:
  949. if re.search(regex, path.name):
  950. new_path = path.with_name(re.sub(regex, replacement, path.name))
  951. if new_path.exists():
  952. return new_path
  953. return None
  954. def find_multifile_paths(path: Path) -> List[Path]:
  955. '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
  956. the whole list of paths in the model.
  957. '''
  958. ret: List[Path] = []
  959. for i in itertools.count():
  960. nth_path = nth_multifile_path(path, i)
  961. if nth_path is None:
  962. break
  963. ret.append(nth_path)
  964. if not ret:
  965. # No matches. This should only happen if the file was named, e.g.,
  966. # foo.0, and there was no file named foo. Oh well, try to process it
  967. # as a single file.
  968. return [path]
  969. return ret
  970. def load_some_model(path: Path) -> ModelPlus:
  971. '''Load a model of any supported format.'''
  972. # Be extra-friendly and accept either a file or a directory:
  973. if path.is_dir():
  974. # Check if it's a set of safetensors files first
  975. files = list(path.glob("model-00001-of-*.safetensors"))
  976. if not files:
  977. # Try the PyTorch patterns too, with lower priority
  978. globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
  979. files = [file for glob in globs for file in path.glob(glob)]
  980. if not files:
  981. # Try GGML too, but with lower priority, since if both a non-GGML
  982. # model and a GGML model exist in the same directory, we assume the
  983. # latter was converted from the former.
  984. files = list(path.glob("ggml-model*.bin*"))
  985. if not files:
  986. raise Exception(f"Can't find model in directory {path}")
  987. if len(files) > 1:
  988. raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}")
  989. path = files[0]
  990. paths = find_multifile_paths(path)
  991. models_plus: List[ModelPlus] = []
  992. for path in paths:
  993. print(f"Loading model file {path}")
  994. models_plus.append(lazy_load_file(path))
  995. model_plus = merge_multifile_models(models_plus)
  996. return model_plus
  997. def filter_and_sort_tensors(model: LazyModel) -> LazyModel:
  998. return {name: model[name] for name in TENSORS_LIST if name in model}
  999. def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab:
  1000. print(f"vocabtype: {vocabtype}")
  1001. # Be extra-friendly and accept either a file or a directory. Also, if it's
  1002. # a directory, it might be the model directory, and tokenizer.model might
  1003. # be in the parent of that.
  1004. if path.is_dir():
  1005. vocab_file = "tokenizer.model"
  1006. if vocabtype == 'bpe':
  1007. vocab_file = "vocab.json"
  1008. path2 = path / vocab_file
  1009. # Use `.parent` instead of /.. to handle the symlink case better.
  1010. path3 = path.parent / vocab_file
  1011. if path2.exists():
  1012. path = path2
  1013. elif path3.exists():
  1014. path = path3
  1015. else:
  1016. raise FileNotFoundError(
  1017. f"Could not find tokenizer.model in {path} or its parent; "
  1018. "if it's in another directory, pass the directory as --vocab-dir")
  1019. added_tokens_path = path.parent / "added_tokens.json"
  1020. print(f"Loading vocab file {path}")
  1021. return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None,
  1022. vocabtype)
  1023. def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
  1024. namestr = {
  1025. GGMLFileType.AllF32: "f32",
  1026. GGMLFileType.MostlyF16: "f16",
  1027. GGMLFileType.MostlyQ4_0: "q4_0",
  1028. GGMLFileType.MostlyQ4_1: "q4_1",
  1029. GGMLFileType.PerLayerIsQ4_1: "q4_1",
  1030. }[file_type]
  1031. ret = model_paths[0].parent / f"ggml-model-{namestr}.bin"
  1032. if ret in model_paths:
  1033. sys.stderr.write(
  1034. f"Error: Default output path ({ret}) would overwrite the input. "
  1035. "Please explicitly specify a path using --outfile.\n")
  1036. sys.exit(1)
  1037. return ret
  1038. def do_dump_model(model_plus: ModelPlus) -> None:
  1039. print(f"model_plus.paths = {model_plus.paths!r}")
  1040. print(f"model_plus.format = {model_plus.format!r}")
  1041. print(f"model_plus.vocab = {model_plus.vocab!r}")
  1042. for name, lazy_tensor in model_plus.model.items():
  1043. print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
  1044. def main(args_in: Optional[List[str]] = None) -> None:
  1045. parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file")
  1046. parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
  1047. parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
  1048. parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
  1049. parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_0"], help="output format (default: based on input)")
  1050. parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
  1051. parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
  1052. parser.add_argument("model", type=Path,
  1053. help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
  1054. parser.add_argument("--vocabtype", default='spm', choices=["spm", "bpe"], help="vocab format (default: spm)")
  1055. args = parser.parse_args(args_in)
  1056. vocab: Vocab
  1057. if args.dump_single:
  1058. model_plus = lazy_load_file(args.model)
  1059. do_dump_model(model_plus)
  1060. elif args.vocab_only:
  1061. vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype)
  1062. assert args.outfile, "need --outfile if using --vocab-only"
  1063. outfile = args.outfile
  1064. OutputFile.write_vocab_only(outfile, vocab)
  1065. print(f"Wrote {outfile}")
  1066. else:
  1067. model_plus = load_some_model(args.model)
  1068. if args.dump:
  1069. do_dump_model(model_plus)
  1070. return
  1071. if model_plus.vocab is not None and args.vocab_dir is None:
  1072. vocab = model_plus.vocab
  1073. else:
  1074. vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
  1075. vocab = load_vocab(vocab_dir, args.vocabtype)
  1076. params = Params.load(model_plus)
  1077. model = model_plus.model
  1078. model = do_necessary_conversions(model, params)
  1079. output_type = pick_output_type(model, args.outtype)
  1080. model = convert_to_output_type(model, output_type)
  1081. outfile = args.outfile or default_outfile(model_plus.paths, output_type)
  1082. OutputFile.write_all(outfile, params, output_type, model, vocab)
  1083. print(f"Wrote {outfile}")
  1084. if __name__ == '__main__':
  1085. main()