convert.py 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. import argparse
  2. import concurrent.futures
  3. import copy
  4. import enum
  5. import faulthandler
  6. import functools
  7. import io
  8. import itertools
  9. import json
  10. import math
  11. import mmap
  12. import pickle
  13. import re
  14. import signal
  15. import struct
  16. import sys
  17. import zipfile
  18. from abc import ABCMeta, abstractmethod
  19. from dataclasses import dataclass
  20. from pathlib import Path
  21. from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List,
  22. Literal, Optional, Sequence, Tuple, TypeVar, Union)
  23. import numpy as np
  24. from sentencepiece import SentencePieceProcessor # type: ignore
  25. if TYPE_CHECKING:
  26. from typing_extensions import TypeAlias
  27. if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
  28. faulthandler.register(signal.SIGUSR1)
  29. NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
  30. @dataclass(frozen=True)
  31. class UnquantizedDataType:
  32. name: str
  33. DT_F16 = UnquantizedDataType('F16')
  34. DT_F32 = UnquantizedDataType('F32')
  35. DT_I32 = UnquantizedDataType('I32')
  36. DT_BF16 = UnquantizedDataType('BF16')
  37. @dataclass(frozen=True)
  38. class QuantizedDataType:
  39. groupsize: int
  40. have_addends: bool
  41. have_g_idx: bool
  42. DT_Q4_0 = QuantizedDataType(groupsize=32, have_addends=False, have_g_idx=False)
  43. DT_Q4_1 = QuantizedDataType(groupsize=32, have_addends=True, have_g_idx=False)
  44. DataType = Union[UnquantizedDataType, QuantizedDataType]
  45. DATA_TYPE_TO_FTYPE: Dict[DataType, int] = {
  46. DT_F32: 0,
  47. DT_F16: 1,
  48. DT_Q4_0: 2,
  49. DT_Q4_1: 3,
  50. }
  51. FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \
  52. {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()}
  53. DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = {
  54. DT_BF16: np.dtype(np.uint16),
  55. DT_F16: np.dtype(np.float16),
  56. DT_F32: np.dtype(np.float32),
  57. DT_I32: np.dtype(np.int32),
  58. }
  59. NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \
  60. {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()}
  61. class GGMLFileType(enum.Enum):
  62. AllF32 = 0
  63. MostlyF16 = 1 # except 1d tensors
  64. MostlyQ4_0 = 2 # except 1d tensors
  65. MostlyQ4_1 = 3 # except 1d tensors
  66. PerLayerIsQ4_1 = 4 # but tok_embeddings.weight and output.weight are F16
  67. def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType:
  68. if len(tensor.shape) == 1:
  69. # 1D tensors are always F32.
  70. return DT_F32
  71. elif self == GGMLFileType.AllF32:
  72. return DT_F32
  73. elif self == GGMLFileType.MostlyF16:
  74. return DT_F16
  75. elif self == GGMLFileType.MostlyQ4_0:
  76. return DT_Q4_0
  77. elif self == GGMLFileType.MostlyQ4_1:
  78. return DT_Q4_1
  79. elif self == GGMLFileType.PerLayerIsQ4_1:
  80. if name in ('output.weight', 'tok_embeddings.weight'):
  81. return DT_F16
  82. else:
  83. return DT_Q4_1
  84. else:
  85. raise ValueError(self)
  86. def make_tensors_list() -> List[str]:
  87. ret = [
  88. 'tok_embeddings.weight',
  89. 'norm.weight',
  90. 'output.weight',
  91. ]
  92. for i in range(80): # maximum number of layer
  93. ret += [
  94. f'layers.{i}.attention.wq.weight',
  95. f'layers.{i}.attention.wk.weight',
  96. f'layers.{i}.attention.wv.weight',
  97. f'layers.{i}.attention.wo.weight',
  98. f'layers.{i}.attention_norm.weight',
  99. f'layers.{i}.feed_forward.w1.weight',
  100. f'layers.{i}.feed_forward.w2.weight',
  101. f'layers.{i}.feed_forward.w3.weight',
  102. f'layers.{i}.ffn_norm.weight',
  103. ]
  104. return ret
  105. TENSORS_LIST = make_tensors_list()
  106. TENSORS_SET = set(TENSORS_LIST)
  107. def find_n_mult(n_ff: int, n_embd: int) -> int:
  108. # hardcoded magic range
  109. for n_mult in range(256, 1, -1):
  110. calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
  111. if calc_ff == n_ff:
  112. return n_mult
  113. raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
  114. @dataclass
  115. class Params:
  116. n_vocab: int
  117. n_embd: int
  118. n_mult: int
  119. n_head: int
  120. n_layer: int
  121. @staticmethod
  122. def guessed(model: 'LazyModel') -> 'Params':
  123. # try transformer naming first
  124. n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape
  125. # try transformer naming first
  126. if "model.layers.0.self_attn.q_proj.weight" in model:
  127. n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
  128. elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
  129. n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
  130. else:
  131. n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
  132. if n_layer < 1:
  133. raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
  134. "Suggestion: provide 'config.json' of the model in the same directory containing model files.")
  135. n_head=n_embd // 128 # guessed
  136. return Params(
  137. n_vocab=n_vocab,
  138. n_embd=n_embd,
  139. n_mult=256,
  140. n_head=n_head,
  141. n_layer=n_layer,
  142. )
  143. @staticmethod
  144. def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
  145. config = json.load(open(config_path))
  146. n_vocab = config["vocab_size"];
  147. n_embd = config["hidden_size"];
  148. n_head = config["num_attention_heads"];
  149. n_layer = config["num_hidden_layers"];
  150. n_ff = config["intermediate_size"];
  151. n_mult = find_n_mult(n_ff, n_embd);
  152. return Params(
  153. n_vocab=n_vocab,
  154. n_embd=n_embd,
  155. n_mult=n_mult,
  156. n_head=n_head,
  157. n_layer=n_layer,
  158. )
  159. @staticmethod
  160. def load(model_plus: 'ModelPlus') -> 'Params':
  161. orig_config_path = model_plus.paths[0].parent / "params.json"
  162. hf_transformer_config_path = model_plus.paths[0].parent / "config.json"
  163. if hf_transformer_config_path.exists():
  164. params = Params.loadHFTransformerJson(model_plus.model, hf_transformer_config_path)
  165. else:
  166. params = Params.guessed(model_plus.model)
  167. print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}')
  168. return params
  169. class SentencePieceVocab:
  170. def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
  171. self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
  172. added_tokens: Dict[str, int]
  173. if fname_added_tokens is not None:
  174. added_tokens = json.load(open(fname_added_tokens))
  175. else:
  176. added_tokens = {}
  177. vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
  178. expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
  179. actual_ids = sorted(added_tokens.values())
  180. if expected_ids != actual_ids:
  181. raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
  182. items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
  183. self.added_tokens_list = [text for (text, idx) in items]
  184. self.vocab_size_base: int = vocab_size
  185. self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
  186. self.fname_tokenizer = fname_tokenizer
  187. self.fname_added_tokens = fname_added_tokens
  188. def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
  189. tokenizer = self.sentencepiece_tokenizer
  190. for i in range(tokenizer.vocab_size()):
  191. text: bytes
  192. if tokenizer.is_unknown(i):
  193. text = " \u2047 ".encode("utf-8")
  194. elif tokenizer.is_control(i):
  195. text = b""
  196. elif tokenizer.is_byte(i):
  197. piece = tokenizer.id_to_piece(i)
  198. if len(piece) != 6:
  199. raise Exception(f"Invalid token: {piece}")
  200. byte_value = int(piece[3:-1], 16)
  201. text = struct.pack("B", byte_value)
  202. else:
  203. text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
  204. score: float = tokenizer.get_score(i)
  205. yield text, score
  206. def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
  207. for text in self.added_tokens_list:
  208. score = -1000.0
  209. yield text.encode("utf-8"), score
  210. def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
  211. yield from self.sentencepiece_tokens()
  212. yield from self.added_tokens()
  213. def __repr__(self) -> str:
  214. return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
  215. class GGMLVocab:
  216. def __init__(self, tokens: List[Tuple[bytes, float]]):
  217. self.tokens = tokens
  218. self.vocab_size = len(tokens)
  219. def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
  220. return self.tokens
  221. def __repr__(self) -> str:
  222. return f"<GGMLVocab with {self.vocab_size} tokens>"
  223. Vocab = Union[SentencePieceVocab, GGMLVocab]
  224. def permute(weights: NDArray, n_head: int) -> NDArray:
  225. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  226. .swapaxes(1, 2)
  227. .reshape(weights.shape))
  228. def dequantize_q4(qvalues_pack32: NDArray, scales: NDArray, addends: Optional[NDArray], g_idx: Optional[NDArray]) -> NDArray:
  229. # First reinterpret each row from a list of int32s containing 8 values each
  230. # to a list of uint8s containing 2 values each.
  231. qvalues_pack8 = qvalues_pack32.view(np.uint8)
  232. # Then split out the two values per int8 (which requires an actual
  233. # conversion because numpy doesn't natively support int4s).
  234. qvalues = np.zeros([qvalues_pack8.shape[0], qvalues_pack8.shape[1] * 2], dtype=np.uint8)
  235. qvalues[:, 0::2] = qvalues_pack8 & 0xf
  236. qvalues[:, 1::2] = qvalues_pack8 >> 4
  237. assert addends is None or addends.shape == scales.shape
  238. assert qvalues.shape[0] == scales.shape[0]
  239. assert qvalues.shape[1] % scales.shape[1] == 0
  240. if g_idx is None:
  241. repeat_count = qvalues.shape[1] // scales.shape[1]
  242. scales = scales[:, :, np.newaxis]
  243. if addends is not None:
  244. addends = addends[:, :, np.newaxis]
  245. # Reshape so that the below computation broadcasts over scales and addends:
  246. qvalues.shape = (qvalues.shape[0], scales.shape[1], int(repeat_count))
  247. else:
  248. # In this case the scale and addend is selected for each column by g_idx:
  249. assert addends is not None
  250. scales = scales[:, g_idx]
  251. addends = addends[:, g_idx]
  252. if addends is None:
  253. # Q4_0
  254. qvalues = qvalues.view(np.int8)
  255. qvalues -= 8
  256. # And do the actual 'value = scale * qvalue + addend' computation.
  257. values = scales * qvalues
  258. if addends is not None:
  259. values += addends
  260. if g_idx is None:
  261. values.shape = (values.shape[0], values.shape[1] * values.shape[2])
  262. return values
  263. class Tensor(metaclass=ABCMeta):
  264. data_type: DataType
  265. @abstractmethod
  266. def astype(self, data_type: DataType) -> 'Tensor': ...
  267. @abstractmethod
  268. def permute(self, n_head: int) -> 'Tensor': ...
  269. @abstractmethod
  270. def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
  271. @abstractmethod
  272. def part(self, n_part: int) -> 'UnquantizedTensor': ...
  273. @abstractmethod
  274. def to_ggml(self) -> 'GGMLCompatibleTensor': ...
  275. def bf16_to_fp32(bf16_arr: np.ndarray) -> np.ndarray:
  276. assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}"
  277. fp32_arr = bf16_arr.astype(np.uint32) << 16
  278. return fp32_arr.view(np.float32)
  279. class UnquantizedTensor(Tensor):
  280. def __init__(self, ndarray: NDArray) -> None:
  281. assert isinstance(ndarray, np.ndarray)
  282. self.ndarray = ndarray
  283. self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype]
  284. def astype(self, data_type: DataType) -> Tensor:
  285. dtype = DATA_TYPE_TO_NUMPY[data_type]
  286. if self.data_type == DT_BF16:
  287. self.ndarray = bf16_to_fp32(self.ndarray)
  288. return UnquantizedTensor(self.ndarray.astype(dtype))
  289. def to_ggml(self) -> 'UnquantizedTensor':
  290. return self
  291. def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
  292. r = self.ndarray.shape[0] // 3
  293. return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head))
  294. def part(self, n_part: int) -> 'UnquantizedTensor':
  295. r = self.ndarray.shape[0] // 3
  296. return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
  297. def permute(self, n_head: int) -> 'UnquantizedTensor':
  298. return UnquantizedTensor(permute(self.ndarray, n_head))
  299. def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
  300. tensor = lazy_tensor.load()
  301. assert isinstance(tensor, UnquantizedTensor)
  302. # double-check:
  303. actual_shape = list(tensor.ndarray.shape)
  304. assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape)
  305. if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype:
  306. if convert:
  307. tensor.ndarray = tensor.ndarray.astype(expected_dtype)
  308. else:
  309. raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}')
  310. return tensor.ndarray
  311. class GGMLQuantizedTensor(Tensor):
  312. data_type: QuantizedDataType
  313. def __init__(self, ndarray: NDArray, shape: List[int], data_type: DataType) -> None:
  314. rows, columns = shape
  315. assert data_type in (DT_Q4_1, DT_Q4_0) # for now
  316. assert isinstance(data_type, QuantizedDataType) # redundant, but mypy complains without this
  317. assert columns % data_type.groupsize == 0
  318. words_in_block = 6 if data_type == DT_Q4_1 else 5
  319. self.ndarray = ndarray.view(dtype=np.uint32).reshape((rows, columns // data_type.groupsize, words_in_block))
  320. self.shape = shape[:]
  321. self.data_type = data_type
  322. def astype(self, data_type: DataType) -> Tensor:
  323. if data_type == self.data_type:
  324. return self
  325. scales = self.ndarray[:, :, 0].view(np.float32)
  326. if self.data_type.have_addends:
  327. addends = self.ndarray[:, :, 1].view(np.float32)
  328. else:
  329. addends = None
  330. qweights = self.ndarray[:, :, -4:].reshape([self.shape[0], self.shape[1] // 8])
  331. dq = dequantize_q4(qweights, scales, addends, g_idx=None)
  332. return UnquantizedTensor(dq).astype(data_type)
  333. def to_ggml(self) -> 'GGMLQuantizedTensor':
  334. return self
  335. def permute(self, n_head: int) -> 'GGMLQuantizedTensor':
  336. return GGMLQuantizedTensor(permute(self.ndarray, n_head), self.shape, self.data_type)
  337. GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor]
  338. class DeferredPermutedTensor(Tensor):
  339. def __init__(self, base: Tensor, n_head: int) -> None:
  340. self.base = base
  341. self.n_head = n_head
  342. self.data_type = self.base.data_type
  343. def astype(self, data_type: DataType) -> Tensor:
  344. return self.base.astype(data_type).permute(self.n_head)
  345. def to_ggml(self) -> GGMLCompatibleTensor:
  346. return self.base.to_ggml().permute(self.n_head)
  347. def permute(self, n_head: int) -> Tensor:
  348. raise Exception("shouldn't permute twice")
  349. class GPTQForLLaMaQuantizedTensor(Tensor):
  350. def __init__(self, model: 'LazyModel', namebase: str) -> None:
  351. qweight = load_unquantized(model[f"{namebase}.qweight"], np.int32)
  352. scales = load_unquantized(model[f"{namebase}.scales"], np.float32, convert=True)
  353. bias = model.get(f"{namebase}.bias")
  354. if bias is not None:
  355. # Q4_1 does not support bias; good thing the bias is always all zeros.
  356. assert not np.any(load_unquantized(bias))
  357. if f"{namebase}.zeros" in model:
  358. zeros = load_unquantized(model[f"{namebase}.zeros"], np.float32)
  359. else:
  360. qzeros = load_unquantized(model[f"{namebase}.qzeros"], np.int32)
  361. assert qzeros.dtype == np.int32
  362. zeros = dequantize_q4(qzeros, scales, scales, g_idx=None)
  363. assert zeros.dtype == np.float32
  364. assert zeros.shape == scales.shape
  365. # Output is transposed compared to the input, and addends have their sign flipped.
  366. # Scales and zeros similarly must be transposed but only for newer
  367. # versions of GPTQ-for-LLaMa; the older versions can be identified by
  368. # having shape (n_embd, 1).
  369. qweight = qweight.T
  370. if scales.shape[1] != 1:
  371. scales = scales.T
  372. zeros = zeros.T
  373. # Output also has signs flipped for the addends.
  374. self.qweight = qweight
  375. self.scales = scales
  376. self.addends = -zeros
  377. self.g_idx: Optional[NDArray]
  378. if f"{namebase}.g_idx" in model:
  379. self.g_idx = load_unquantized(model[f"{namebase}.g_idx"], np.int32)
  380. assert self.g_idx.shape == (qweight.shape[1] * 8,)
  381. else:
  382. self.g_idx = None
  383. self.shape = [self.qweight.shape[0], self.qweight.shape[1] * 8]
  384. self.data_type = QuantizedDataType(groupsize=self.groupsize(), have_addends=True,
  385. have_g_idx=(self.g_idx is not None))
  386. def inspect(self, row: int, col: int) -> None:
  387. '''For debugging.'''
  388. qweight = (self.qweight[row, col // 8] >> (4 * (col & 7))) & 0xf
  389. if self.g_idx is not None:
  390. group = self.g_idx[col]
  391. else:
  392. group = int(col // self.groupsize())
  393. scale = self.scales[row, group]
  394. addend = self.addends[row, group]
  395. with np.printoptions(precision=None, suppress=True):
  396. print(f'scale:{scale} addend:{addend} qweight:{qweight}')
  397. print('possible values:', np.arange(16) * scale + addend)
  398. print('actual value:', qweight * scale + addend)
  399. def astype(self, data_type: DataType) -> Tensor:
  400. if isinstance(data_type, QuantizedDataType):
  401. assert self.g_idx is None and data_type.have_addends is True and data_type.have_g_idx is False
  402. return self.regroup(data_type.groupsize)
  403. dequantized = dequantize_q4(np.ascontiguousarray(self.qweight), self.scales, self.addends, self.g_idx)
  404. return UnquantizedTensor(dequantized).astype(data_type)
  405. def groupsize(self) -> int:
  406. assert self.addends.shape == self.scales.shape
  407. assert self.shape[1] % self.scales.shape[1] == 0
  408. return self.shape[1] // self.scales.shape[1]
  409. def regroup(self, new_groupsize: int = 32) -> 'GPTQForLLaMaQuantizedTensor':
  410. # Old versions of GPTQ-for-LLaMa shared scales and addends between all the
  411. # columns in a row. Newer versions share them between every set of N
  412. # columns in a row, where N is the `groupsize` parameter, usually 128. The
  413. # output format shares them between every set of 32 columns. To handle
  414. # this, duplicate scales and addends for every smaller group.
  415. # (In the above, 'row' and 'column' are in the sense of the output.)
  416. assert self.g_idx is None
  417. old_groupsize = self.groupsize()
  418. assert old_groupsize >= new_groupsize and old_groupsize % new_groupsize == 0, old_groupsize
  419. ret = copy.copy(self)
  420. ret.addends = self.addends.repeat(old_groupsize // new_groupsize, axis=1)
  421. ret.scales = self.scales.repeat(old_groupsize // new_groupsize, axis=1)
  422. ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False)
  423. return ret
  424. def permute(self, n_head: int) -> Tensor:
  425. return DeferredPermutedTensor(self, n_head)
  426. def to_ggml(self) -> GGMLQuantizedTensor:
  427. # The output format looks like this:
  428. # For each row:
  429. # For each group of 32 columns:
  430. # - addend (float32, 4 bytes)
  431. # - scale (float32, 4 bytes)
  432. # - weights (int4 * 32, 16 bytes)
  433. if self.groupsize() != 32:
  434. raise Exception("should have been regrouped before converting to ggml")
  435. # Since the output format is mixed between integers and floats, we have
  436. # to hackily view the floats as int32s just so numpy will let us
  437. # concatenate them.
  438. addends_view = self.addends.view(dtype=np.int32)[:, :, np.newaxis]
  439. scales_view = self.scales.view(dtype=np.int32)[:, :, np.newaxis]
  440. # Split into groups of 4 columns (i.e. 32 columns of quantized data):
  441. grouped = self.qweight.reshape([self.qweight.shape[0], self.qweight.shape[1] // 4, 4])
  442. # And concatenate:
  443. grouped = np.concatenate([scales_view, addends_view, grouped], axis=2, casting='no')
  444. return GGMLQuantizedTensor(grouped, self.shape, DT_Q4_1)
  445. @dataclass
  446. class LazyTensor:
  447. _load: Callable[[], Tensor]
  448. shape: List[int]
  449. data_type: DataType
  450. description: str
  451. def load(self) -> Tensor:
  452. ret = self._load()
  453. assert ret.data_type == self.data_type, (self.data_type, ret.data_type, self.description)
  454. return ret
  455. def astype(self, data_type: DataType) -> 'LazyTensor':
  456. self.validate_conversion_to(data_type)
  457. def load() -> Tensor:
  458. return self.load().astype(data_type)
  459. return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}')
  460. def validate_conversion_to(self, data_type: DataType) -> None:
  461. if data_type == self.data_type:
  462. return
  463. if isinstance(data_type, QuantizedDataType):
  464. if not isinstance(self.data_type, QuantizedDataType):
  465. raise Exception(f"Can't turn an unquantized tensor into a quantized type ({data_type})")
  466. if self.data_type.have_g_idx:
  467. sys.stderr.write(
  468. "Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), "
  469. "which is not yet natively supported by GGML. "
  470. "For now you can still convert this model by passing `--outtype f16` to dequantize, "
  471. "but that will result in a much larger output file for no quality benefit.\n")
  472. sys.exit(1)
  473. assert not data_type.have_g_idx and self.data_type.have_addends and data_type.have_addends
  474. LazyModel = Dict[str, LazyTensor]
  475. @dataclass
  476. class ModelPlus:
  477. model: LazyModel
  478. paths: List[Path] # Where this was read from.
  479. format: Literal['ggml', 'torch', 'safetensors']
  480. vocab: Optional[Vocab] # For GGML models (which have vocab built in), the vocab.
  481. def merge_sharded(models: List[LazyModel]) -> LazyModel:
  482. # Original LLaMA models have each file contain one part of each tensor.
  483. # Use a dict instead of a set to preserve order.
  484. names = {name: None for model in models for name in model}
  485. def convert(name: str) -> LazyTensor:
  486. lazy_tensors: List[LazyTensor] = [model[name] for model in models]
  487. if len(lazy_tensors) == 1:
  488. # only one file; don't go through this procedure since there might
  489. # be quantized tensors
  490. return lazy_tensors[0]
  491. if len(lazy_tensors[0].shape) == 1:
  492. # the tensor is just duplicated in every file
  493. return lazy_tensors[0]
  494. if name.startswith('tok_embeddings.') or \
  495. name.endswith('.attention.wo.weight') or \
  496. name.endswith('.feed_forward.w2.weight'):
  497. # split by columns
  498. axis = 1
  499. else:
  500. # split by rows
  501. axis = 0
  502. concatenated_shape = list(lazy_tensors[0].shape)
  503. concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors)
  504. def load() -> UnquantizedTensor:
  505. ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors]
  506. concatenated: NDArray = np.concatenate(ndarrays, axis=axis)
  507. return UnquantizedTensor(concatenated)
  508. description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]'
  509. return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description)
  510. return {name: convert(name) for name in names}
  511. def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
  512. formats = set(mp.format for mp in models_plus)
  513. assert len(formats) == 1, "different formats?"
  514. format = formats.pop()
  515. paths = [path for mp in models_plus for path in mp.paths]
  516. # Use the first non-None vocab, if any.
  517. try:
  518. vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None)
  519. except StopIteration:
  520. vocab = None
  521. if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
  522. # Transformers models put different tensors in different files, but
  523. # don't split indivdual tensors between files.
  524. model: LazyModel = {}
  525. for mp in models_plus:
  526. model.update(mp.model)
  527. else:
  528. model = merge_sharded([mp.model for mp in models_plus])
  529. return ModelPlus(model, paths, format, vocab)
  530. def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor:
  531. def load() -> Tensor:
  532. return lazy_tensor.load().permute(n_head)
  533. return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
  534. def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
  535. def load() -> Tensor:
  536. return lazy_tensor.load().permute_part(n_part, n_head)
  537. s = lazy_tensor.shape.copy()
  538. s[0] = s[0] // 3
  539. return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
  540. def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
  541. def load() -> Tensor:
  542. return lazy_tensor.load().part(n_part)
  543. s = lazy_tensor.shape.copy()
  544. s[0] = s[0] // 3
  545. return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
  546. def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
  547. out: LazyModel = {}
  548. out["tok_embeddings.weight"] = model["model.embed_tokens.weight"]
  549. out["norm.weight"] = model["model.norm.weight"]
  550. out["output.weight"] = model["lm_head.weight"]
  551. for i in itertools.count():
  552. if f"model.layers.{i}.self_attn.q_proj.weight" in model:
  553. out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
  554. out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
  555. out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
  556. elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
  557. out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
  558. out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head)
  559. out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
  560. else:
  561. break
  562. out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"]
  563. out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"]
  564. out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"]
  565. out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"]
  566. out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"]
  567. out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"]
  568. return out
  569. def handle_quantization(model: LazyModel) -> LazyModel:
  570. '''Convert a model with entries for 'foo.qweight', 'foo.scales', etc.
  571. (which resolve to UnquantizedTensors with the raw data) to one with entries
  572. for 'foo.weight' (which resolve to QuantizedTensors).
  573. '''
  574. def convert(name: str) -> Tuple[str, LazyTensor]:
  575. if name.endswith(".qweight"):
  576. namebase = name.rsplit('.', 1)[0]
  577. orig_name = namebase + ".weight"
  578. lazy_tensor = model[name]
  579. assert len(lazy_tensor.shape) == 2
  580. real_shape = [lazy_tensor.shape[1], lazy_tensor.shape[0] * 8]
  581. # Calculate type. This replicates the logic in
  582. # GPTQForLLaMaQuantizedTensor (which is executed when the modelis
  583. # actually loaded).
  584. lazy_scales = model[f"{namebase}.scales"]
  585. scales_width = 1 if lazy_scales.shape[1] == 1 else lazy_scales.shape[0]
  586. assert real_shape[1] % scales_width == 0
  587. groupsize = real_shape[1] // scales_width
  588. have_g_idx = f"{namebase}.g_idx" in model
  589. data_type = QuantizedDataType(groupsize=groupsize, have_addends=True, have_g_idx=have_g_idx)
  590. def load() -> Tensor:
  591. return GPTQForLLaMaQuantizedTensor(model, namebase)
  592. return (orig_name, LazyTensor(load, real_shape, data_type, '[quantized]'))
  593. else:
  594. return (name, model[name])
  595. return dict(convert(name) for name in model)
  596. # Functionality that simulates `torch.load` but where individual tensors are
  597. # only loaded into memory on demand, not all at once.
  598. # PyTorch can't do this natively as of time of writing:
  599. # - https://github.com/pytorch/pytorch/issues/64327
  600. # This allows us to de-shard without multiplying RAM usage, and also
  601. # conveniently drops the PyTorch dependency (though we still need numpy).
  602. @dataclass
  603. class LazyStorageKind:
  604. data_type: DataType
  605. @dataclass
  606. class LazyStorage:
  607. load: Callable[[int, int], NDArray]
  608. kind: LazyStorageKind
  609. description: str
  610. class LazyUnpickler(pickle.Unpickler):
  611. def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile):
  612. super().__init__(fp)
  613. self.data_base_path = data_base_path
  614. self.zip_file = zip_file
  615. def persistent_load(self, pid: Any) -> Any:
  616. assert pid[0] == 'storage'
  617. assert isinstance(pid[1], LazyStorageKind)
  618. data_type = pid[1].data_type
  619. filename_stem = pid[2]
  620. filename = self.data_base_path + '/' + filename_stem
  621. info = self.zip_file.getinfo(filename)
  622. def load(offset: int, elm_count: int) -> NDArray:
  623. dtype = DATA_TYPE_TO_NUMPY.get(data_type)
  624. if dtype is None:
  625. raise Exception("tensor stored in unsupported format")
  626. fp = self.zip_file.open(info)
  627. fp.seek(offset * dtype.itemsize)
  628. size = elm_count * dtype.itemsize
  629. data = fp.read(size)
  630. assert len(data) == size
  631. return np.frombuffer(data, dtype)
  632. description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}'
  633. return LazyStorage(load=load, kind=pid[1], description=description)
  634. # @staticmethod
  635. def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any,
  636. # pyright: ignore[reportSelfClsParameterName]
  637. requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor:
  638. assert isinstance(storage, LazyStorage)
  639. def load() -> UnquantizedTensor:
  640. elm_count = stride[0] * size[0]
  641. return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size))
  642. description = f'pickled storage_offset={storage_offset} in {storage.description}'
  643. return LazyTensor(load, list(size), storage.kind.data_type, description)
  644. # @staticmethod
  645. def rebuild_from_type_v2(func, new_type, args, state):
  646. return func(*args)
  647. CLASSES: Dict[Any, Any] = {
  648. ('torch._tensor', '_rebuild_from_type_v2'): rebuild_from_type_v2,
  649. ('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2,
  650. ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16),
  651. ('torch', 'HalfStorage'): LazyStorageKind(DT_F16),
  652. ('torch', 'FloatStorage'): LazyStorageKind(DT_F32),
  653. ('torch', 'IntStorage'): LazyStorageKind(DT_I32),
  654. ('torch', 'Tensor'): LazyTensor,
  655. }
  656. def find_class(self, module: str, name: str) -> Any:
  657. if not module.startswith('torch'):
  658. return super().find_class(module, name)
  659. return self.CLASSES[(module, name)]
  660. def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus:
  661. zf = zipfile.ZipFile(outer_fp)
  662. pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')]
  663. assert len(pickle_paths) == 1, pickle_paths
  664. pickle_fp = zf.open(pickle_paths[0], 'r')
  665. unpickler = LazyUnpickler(pickle_fp,
  666. data_base_path=pickle_paths[0][:-4],
  667. zip_file=zf)
  668. model = unpickler.load()
  669. as_dict = dict(model.items())
  670. return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None)
  671. SAFETENSORS_DATA_TYPES: Dict[str, DataType] = {
  672. 'BF16': DT_BF16,
  673. 'F16': DT_F16,
  674. 'F32': DT_F32,
  675. 'I32': DT_I32,
  676. }
  677. def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus:
  678. header_size, = struct.unpack('<Q', fp.read(8))
  679. header: Dict[str, Dict[str, Any]] = json.loads(fp.read(header_size))
  680. # Use mmap for the actual data to avoid race conditions with the file offset.
  681. mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
  682. byte_buf = mapped[8 + header_size:]
  683. def convert(info: Dict[str, Any]) -> LazyTensor:
  684. data_type = SAFETENSORS_DATA_TYPES[info['dtype']]
  685. numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
  686. shape: List[int] = info['shape']
  687. begin, end = info['data_offsets']
  688. assert 0 <= begin <= end <= len(byte_buf)
  689. assert end - begin == math.prod(shape) * numpy_dtype.itemsize
  690. buf = byte_buf[begin:end]
  691. def load() -> UnquantizedTensor:
  692. return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
  693. description = f'safetensors begin={begin} end={end} type={data_type} path={path}'
  694. return LazyTensor(load, shape, data_type, description)
  695. model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'}
  696. return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None)
  697. def must_read(fp: IO[bytes], length: int) -> bytes:
  698. ret = fp.read(length)
  699. if len(ret) < length:
  700. raise Exception("unexpectedly reached end of file")
  701. return ret
  702. def lazy_load_ggml_file(fp: io.BufferedReader, path: Path) -> ModelPlus:
  703. magic = must_read(fp, 4)[::-1]
  704. if magic in (b'ggmf', b'ggjt'):
  705. version, = struct.unpack("i", must_read(fp, 4))
  706. assert version == 1
  707. else:
  708. assert magic == b'ggml'
  709. version = None
  710. n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type = struct.unpack('<7i', must_read(fp, 28))
  711. tokens: List[Tuple[bytes, float]] = []
  712. for i in range(n_vocab):
  713. if i == 32000:
  714. # HACK: GPT4All messed with the format without changing the magic
  715. # number. Specifically, they changed the vocab section to contain
  716. # `n_vocab - 1` tokens instead of `n_vocab` (i.e. omitting the
  717. # extra pad token). Try to detect if we're reading a file like
  718. # this.
  719. orig_pos = fp.tell()
  720. fp.seek(20, io.SEEK_CUR)
  721. is_gpt4all = fp.read(21) == b'tok_embeddings.weight'
  722. fp.seek(orig_pos)
  723. if is_gpt4all:
  724. break
  725. length, = struct.unpack("i", must_read(fp, 4))
  726. text = must_read(fp, length)
  727. if magic != b'ggml':
  728. score, = struct.unpack("f", must_read(fp, 4))
  729. tokens.append((text, score))
  730. vocab = GGMLVocab(tokens) if magic != b'ggml' else None
  731. model: LazyModel = {}
  732. # Use mmap for the actual data to avoid race conditions with the file offset.
  733. off = fp.raw.tell()
  734. mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
  735. fp.raw.seek(off) # needed on Windows
  736. def read_tensor() -> None: # this is a function so that variables captured in `load` don't change
  737. shape_len, name_len, ftype = struct.unpack("iii", must_read(fp, 12))
  738. assert 0 <= shape_len <= 3
  739. shape: List[int] = list(struct.unpack(f"{shape_len}i", must_read(fp, 4 * shape_len)))
  740. shape = shape[::-1]
  741. name = must_read(fp, name_len).decode('utf-8')
  742. data_type = FTYPE_TO_DATA_TYPE[ftype]
  743. if magic == b'ggjt':
  744. fp.seek((fp.tell() + 31) & -32)
  745. if data_type == DT_Q4_1:
  746. # See GPTQForLLaMaQuantizedTensor.ggml_ndarray()
  747. size = 24 * (shape[1] // 32) * shape[0]
  748. elif data_type == DT_Q4_0:
  749. size = 20 * (shape[1] // 32) * shape[0]
  750. else:
  751. numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
  752. elm_count = math.prod(shape)
  753. size = elm_count * numpy_dtype.itemsize
  754. offset = fp.tell()
  755. buf = mapped[offset:offset+size]
  756. fp.seek(size, io.SEEK_CUR)
  757. def load() -> Tensor:
  758. if isinstance(data_type, QuantizedDataType):
  759. ndarray = np.frombuffer(buf, dtype=np.uint32)
  760. return GGMLQuantizedTensor(ndarray, shape, data_type)
  761. else:
  762. return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
  763. description = f'ggml offset={offset} type={data_type} path={path}'
  764. model[name] = LazyTensor(load, shape, data_type, description)
  765. while fp.read(1) != b'':
  766. fp.seek(-1, io.SEEK_CUR)
  767. read_tensor()
  768. return ModelPlus(model=model, paths=[path], format='ggml', vocab=vocab)
  769. @functools.lru_cache(maxsize=None)
  770. def lazy_load_file(path: Path) -> ModelPlus:
  771. fp = open(path, 'rb')
  772. first8 = fp.read(8)
  773. fp.seek(0)
  774. if first8[:2] == b'PK':
  775. # A zip file, i.e. PyTorch format
  776. return lazy_load_torch_file(fp, path)
  777. elif first8[2:4] == b'gg':
  778. # GGML format
  779. return lazy_load_ggml_file(fp, path)
  780. elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024:
  781. # Probably safetensors
  782. return lazy_load_safetensors_file(fp, path)
  783. else:
  784. raise ValueError(f"unknown format: {path}")
  785. In = TypeVar('In')
  786. Out = TypeVar('Out')
  787. def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]:
  788. '''Parallel map, but with backpressure. If the caller doesn't call `next`
  789. fast enough, this will stop calling `func` at some point rather than
  790. letting results pile up in memory. Specifically, there is a max of one
  791. output value buffered per thread.'''
  792. with concurrent.futures.ThreadPoolExecutor() as executor:
  793. futures: List[concurrent.futures.Future[Out]] = []
  794. items_rev = list(iterable)[::-1]
  795. for i in range(min(concurrency, len(items_rev))):
  796. futures.append(executor.submit(func, items_rev.pop()))
  797. while futures:
  798. result = futures.pop(0).result()
  799. if items_rev:
  800. futures.append(executor.submit(func, items_rev.pop()))
  801. yield result
  802. def check_vocab_size(params: Params, vocab: Vocab) -> None:
  803. if params.n_vocab != vocab.vocab_size:
  804. # GGMLVocab comes from the same file as the model so shouldn't mismatch:
  805. assert isinstance(vocab, SentencePieceVocab)
  806. if params.n_vocab == vocab.vocab_size_base:
  807. print("Ignoring added_tokens.json since model matches vocab size without it.")
  808. vocab.added_tokens_list = []
  809. vocab.vocab_size = vocab.vocab_size_base
  810. return
  811. msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}"
  812. if vocab.fname_added_tokens is not None:
  813. msg += f" combined with {vocab.fname_added_tokens}"
  814. msg += f" has {vocab.vocab_size})."
  815. if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20 and vocab.fname_added_tokens is None:
  816. msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})."
  817. raise Exception(msg)
  818. class OutputFile:
  819. def __init__(self, fname_out: Path) -> None:
  820. self.fout = open(fname_out, "wb")
  821. def write_file_header(self, params: Params, file_type: GGMLFileType) -> None:
  822. self.fout.write(b"ggjt"[::-1]) # magic
  823. values = [
  824. 1, # file version
  825. params.n_vocab,
  826. params.n_embd,
  827. params.n_mult,
  828. params.n_head,
  829. params.n_layer,
  830. params.n_embd // params.n_head, # rot (obsolete)
  831. file_type.value,
  832. ]
  833. self.fout.write(struct.pack("i" * len(values), *values))
  834. def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None:
  835. sname = name.encode('utf-8')
  836. self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type]))
  837. self.fout.write(struct.pack("i" * len(shape), *shape[::-1]))
  838. self.fout.write(sname)
  839. self.fout.seek((self.fout.tell() + 31) & -32)
  840. def write_vocab(self, vocab: Vocab) -> None:
  841. for text, score in vocab.all_tokens():
  842. self.fout.write(struct.pack("i", len(text)))
  843. self.fout.write(text)
  844. self.fout.write(struct.pack("f", score))
  845. @staticmethod
  846. def write_vocab_only(fname_out: Path, vocab: Vocab) -> None:
  847. of = OutputFile(fname_out)
  848. params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0,
  849. n_head=1, n_layer=0)
  850. of = OutputFile(fname_out)
  851. of.write_file_header(params, file_type=GGMLFileType.AllF32)
  852. of.write_vocab(vocab)
  853. of.fout.close()
  854. @staticmethod
  855. def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None:
  856. check_vocab_size(params, vocab)
  857. of = OutputFile(fname_out)
  858. of.write_file_header(params, file_type)
  859. print("Writing vocab...")
  860. of.write_vocab(vocab)
  861. def do_item(item: Tuple[str, LazyTensor]) -> NDArray:
  862. name, lazy_tensor = item
  863. return lazy_tensor.load().to_ggml().ndarray
  864. ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8)
  865. for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
  866. size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
  867. padi = len(str(len(model)))
  868. print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}")
  869. of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type)
  870. ndarray.tofile(of.fout)
  871. of.fout.close()
  872. def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType:
  873. wq_type = model["layers.0.attention.wq.weight"].data_type
  874. if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)):
  875. return GGMLFileType.AllF32
  876. if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16):
  877. return GGMLFileType.MostlyF16
  878. if output_type_str == "q4_1" or (output_type_str is None and isinstance(wq_type, QuantizedDataType) and
  879. wq_type.have_addends):
  880. if isinstance(model["output.weight"].data_type, QuantizedDataType):
  881. return GGMLFileType.MostlyQ4_1
  882. else:
  883. return GGMLFileType.PerLayerIsQ4_1
  884. if output_type_str == "q4_0" or (output_type_str is None and isinstance(wq_type, QuantizedDataType)):
  885. return GGMLFileType.MostlyQ4_0
  886. name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()}
  887. raise Exception(f"Unexpected combination of types: {name_to_type}")
  888. def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel:
  889. model = handle_quantization(model)
  890. if "lm_head.weight" in model:
  891. model = convert_transformers_to_orig(model, params)
  892. model = filter_and_sort_tensors(model)
  893. return model
  894. def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
  895. return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
  896. for (name, tensor) in model.items()}
  897. def nth_multifile_path(path: Path, n: int) -> Optional[Path]:
  898. '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
  899. the nth path in the model.
  900. '''
  901. # Support the following patterns:
  902. patterns: List[Tuple[str, str]] = [
  903. # - x.00.pth, x.01.pth, etc.
  904. (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'),
  905. # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc.
  906. (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'),
  907. # x.bin, x.bin.1, etc.
  908. (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}')
  909. ]
  910. for regex, replacement in patterns:
  911. if re.search(regex, path.name):
  912. new_path = path.with_name(re.sub(regex, replacement, path.name))
  913. if new_path.exists():
  914. return new_path
  915. return None
  916. def find_multifile_paths(path: Path) -> List[Path]:
  917. '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
  918. the whole list of paths in the model.
  919. '''
  920. ret: List[Path] = []
  921. for i in itertools.count():
  922. nth_path = nth_multifile_path(path, i)
  923. if nth_path is None:
  924. break
  925. ret.append(nth_path)
  926. if not ret:
  927. # No matches. This should only happen if the file was named, e.g.,
  928. # foo.0, and there was no file named foo. Oh well, try to process it
  929. # as a single file.
  930. return [path]
  931. return ret
  932. def load_some_model(path: Path) -> ModelPlus:
  933. '''Load a model of any supported format.'''
  934. # Be extra-friendly and accept either a file or a directory:
  935. if path.is_dir():
  936. # Check if it's a set of safetensors files first
  937. files = list(path.glob("model-00001-of-*.safetensors"))
  938. if not files:
  939. # Try the PyTorch patterns too, with lower priority
  940. globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
  941. files = [file for glob in globs for file in path.glob(glob)]
  942. if not files:
  943. # Try GGML too, but with lower priority, since if both a non-GGML
  944. # model and a GGML model exist in the same directory, we assume the
  945. # latter was converted from the former.
  946. files = list(path.glob("ggml-model*.bin*"))
  947. if not files:
  948. raise Exception(f"Can't find model in directory {path}")
  949. if len(files) > 1:
  950. raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}")
  951. path = files[0]
  952. paths = find_multifile_paths(path)
  953. models_plus: List[ModelPlus] = []
  954. for path in paths:
  955. print(f"Loading model file {path}")
  956. models_plus.append(lazy_load_file(path))
  957. model_plus = merge_multifile_models(models_plus)
  958. return model_plus
  959. def filter_and_sort_tensors(model: LazyModel) -> LazyModel:
  960. return {name: model[name] for name in TENSORS_LIST if name in model}
  961. def load_vocab(path: Path) -> SentencePieceVocab:
  962. # Be extra-friendly and accept either a file or a directory. Also, if it's
  963. # a directory, it might be the model directory, and tokenizer.model might
  964. # be in the parent of that.
  965. if path.is_dir():
  966. path2 = path / "tokenizer.model"
  967. # Use `.parent` instead of /.. to handle the symlink case better.
  968. path3 = path.parent / "tokenizer.model"
  969. if path2.exists():
  970. path = path2
  971. elif path3.exists():
  972. path = path3
  973. else:
  974. raise FileNotFoundError(
  975. f"Could not find tokenizer.model in {path} or its parent; "
  976. "if it's in another directory, pass the directory as --vocab-dir")
  977. added_tokens_path = path.parent / "added_tokens.json"
  978. print(f"Loading vocab file {path}")
  979. return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None)
  980. def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
  981. namestr = {
  982. GGMLFileType.AllF32: "f32",
  983. GGMLFileType.MostlyF16: "f16",
  984. GGMLFileType.MostlyQ4_0: "q4_0",
  985. GGMLFileType.MostlyQ4_1: "q4_1",
  986. GGMLFileType.PerLayerIsQ4_1: "q4_1",
  987. }[file_type]
  988. ret = model_paths[0].parent / f"ggml-model-{namestr}.bin"
  989. if ret in model_paths:
  990. sys.stderr.write(
  991. f"Error: Default output path ({ret}) would overwrite the input. "
  992. "Please explicitly specify a path using --outfile.\n")
  993. sys.exit(1)
  994. return ret
  995. def do_dump_model(model_plus: ModelPlus) -> None:
  996. print(f"model_plus.paths = {model_plus.paths!r}")
  997. print(f"model_plus.format = {model_plus.format!r}")
  998. print(f"model_plus.vocab = {model_plus.vocab!r}")
  999. for name, lazy_tensor in model_plus.model.items():
  1000. print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
  1001. def main(args_in: Optional[List[str]] = None) -> None:
  1002. parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file")
  1003. parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
  1004. parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
  1005. parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
  1006. parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_0"], help="output format (default: based on input)")
  1007. parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
  1008. parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
  1009. parser.add_argument("model", type=Path,
  1010. help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
  1011. args = parser.parse_args(args_in)
  1012. vocab: Vocab
  1013. if args.dump_single:
  1014. model_plus = lazy_load_file(args.model)
  1015. do_dump_model(model_plus)
  1016. elif args.vocab_only:
  1017. vocab = load_vocab(args.vocab_dir or args.model)
  1018. assert args.outfile, "need --outfile if using --vocab-only"
  1019. outfile = args.outfile
  1020. OutputFile.write_vocab_only(outfile, vocab)
  1021. print(f"Wrote {outfile}")
  1022. else:
  1023. model_plus = load_some_model(args.model)
  1024. if args.dump:
  1025. do_dump_model(model_plus)
  1026. return
  1027. if model_plus.vocab is not None and args.vocab_dir is None:
  1028. vocab = model_plus.vocab
  1029. else:
  1030. vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
  1031. vocab = load_vocab(vocab_dir)
  1032. params = Params.load(model_plus)
  1033. model = model_plus.model
  1034. model = do_necessary_conversions(model, params)
  1035. output_type = pick_output_type(model, args.outtype)
  1036. model = convert_to_output_type(model, output_type)
  1037. outfile = args.outfile or default_outfile(model_plus.paths, output_type)
  1038. OutputFile.write_all(outfile, params, output_type, model, vocab)
  1039. print(f"Wrote {outfile}")
  1040. if __name__ == '__main__':
  1041. main()