convert.py 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172
  1. #!/usr/bin/env python3
  2. import gguf
  3. import argparse
  4. import concurrent.futures
  5. from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
  6. import copy
  7. import enum
  8. import faulthandler
  9. import functools
  10. import io
  11. import itertools
  12. import json
  13. import math
  14. import mmap
  15. import pickle
  16. import re
  17. import signal
  18. import struct
  19. import sys
  20. import time
  21. import zipfile
  22. import numpy as np
  23. from abc import ABCMeta, abstractmethod
  24. from dataclasses import dataclass
  25. from pathlib import Path
  26. from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Generator, Iterable, List, Literal, Optional, Sequence, Set, Tuple, TypeVar, Union)
  27. from sentencepiece import SentencePieceProcessor # type: ignore
  28. if TYPE_CHECKING:
  29. from typing_extensions import TypeAlias
  30. if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
  31. faulthandler.register(signal.SIGUSR1)
  32. NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
  33. ARCH=gguf.MODEL_ARCH.LLAMA
  34. NAMES=gguf.MODEL_TENSOR_NAMES[ARCH]
  35. DEFAULT_CONCURRENCY = 8
  36. #
  37. # data types
  38. #
  39. @dataclass(frozen=True)
  40. class DataType:
  41. name: str
  42. dtype: 'np.dtype[Any]'
  43. valid_conversions: List[str]
  44. def elements_to_bytes(self, n_elements: int) -> int:
  45. return n_elements * self.dtype.itemsize
  46. @dataclass(frozen=True)
  47. class UnquantizedDataType(DataType):
  48. pass
  49. DT_F16 = UnquantizedDataType('F16', dtype = np.dtype(np.float16), valid_conversions = ['F32', 'Q8_0'])
  50. DT_F32 = UnquantizedDataType('F32', dtype = np.dtype(np.float32), valid_conversions = ['F16', 'Q8_0'])
  51. DT_I32 = UnquantizedDataType('I32', dtype = np.dtype(np.int16), valid_conversions = [])
  52. DT_BF16 = UnquantizedDataType('BF16', dtype = np.dtype(np.uint16), valid_conversions = ['F32', 'F16', 'Q8_0'])
  53. @dataclass(frozen=True)
  54. class QuantizedDataType(DataType):
  55. block_size: int
  56. quantized_dtype: 'np.dtype[Any]'
  57. ggml_type: gguf.GGMLQuantizationType
  58. def quantize(self, arr: NDArray) -> NDArray:
  59. raise NotImplementedError(f'Quantization for {self.name} not implemented')
  60. def elements_to_bytes(self, n_elements: int) -> int:
  61. assert n_elements % self.block_size == 0, f'Invalid number of elements {n_elements} for {self.name} with block size {self.block_size}'
  62. return self.quantized_dtype.itemsize * (n_elements // self.block_size)
  63. @dataclass(frozen=True)
  64. class Q8_0QuantizedDataType(QuantizedDataType):
  65. # Mini Q8_0 quantization in Python!
  66. def quantize(self, arr: NDArray) -> NDArray:
  67. assert arr.size % self.block_size == 0 and arr.size != 0, f'Bad array size {arr.size}'
  68. assert arr.dtype == np.float32, f'Bad array type {arr.dtype}'
  69. n_blocks = arr.size // self.block_size
  70. blocks = arr.reshape((n_blocks, self.block_size))
  71. # Much faster implementation of block quantization contributed by @Cebtenzzre
  72. def quantize_blocks_q8_0(blocks: NDArray) -> Iterable[Tuple[Any, Any]]:
  73. d = abs(blocks).max(axis = 1) / np.float32(127)
  74. with np.errstate(divide = 'ignore'):
  75. qs = (blocks / d[:, None]).round()
  76. qs[d == 0] = 0
  77. yield from zip(d, qs)
  78. return np.fromiter(quantize_blocks_q8_0(blocks), count = n_blocks, dtype = self.quantized_dtype)
  79. DT_Q8_0 = Q8_0QuantizedDataType('Q8_0',
  80. dtype = np.dtype(np.float32), valid_conversions = [],
  81. ggml_type = gguf.GGMLQuantizationType.Q8_0, block_size = 32,
  82. quantized_dtype = np.dtype([('d', '<f2'), ('qs', 'i1', (32,))]))
  83. # Quantized types skipped here because they may also map to np.float32
  84. NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = {}
  85. for dt in (DT_BF16, DT_F16, DT_F32, DT_I32):
  86. if dt.dtype in NUMPY_TYPE_TO_DATA_TYPE:
  87. raise ValueError(f'Invalid duplicate data type {dt}')
  88. NUMPY_TYPE_TO_DATA_TYPE[dt.dtype] = dt
  89. SAFETENSORS_DATA_TYPES: Dict[str, DataType] = {
  90. 'BF16': DT_BF16,
  91. 'F16': DT_F16,
  92. 'F32': DT_F32,
  93. 'I32': DT_I32,
  94. }
  95. # TODO: match this with `llama_ftype`
  96. # TODO: rename to LLAMAFileType
  97. # TODO: move to `gguf.py`
  98. class GGMLFileType(enum.IntEnum):
  99. AllF32 = 0
  100. MostlyF16 = 1 # except 1d tensors
  101. MostlyQ8_0 = 7 # except 1d tensors
  102. def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType:
  103. dt = GGML_FILE_TYPE_TO_DATA_TYPE.get(self)
  104. if dt is None:
  105. raise ValueError(self)
  106. # 1D tensors are always F32.
  107. return dt if len(tensor.shape) > 1 else DT_F32
  108. GGML_FILE_TYPE_TO_DATA_TYPE: Dict[GGMLFileType, DataType] = {
  109. GGMLFileType.AllF32 : DT_F32,
  110. GGMLFileType.MostlyF16 : DT_F16,
  111. GGMLFileType.MostlyQ8_0: DT_Q8_0,
  112. }
  113. #
  114. # hparams loading
  115. #
  116. @dataclass
  117. class Params:
  118. n_vocab: int
  119. n_embd: int
  120. n_mult: int
  121. n_layer: int
  122. n_ctx: int
  123. n_ff: int
  124. n_head: int
  125. n_head_kv: int
  126. f_norm_eps: float
  127. f_rope_freq_base: Optional[float] = None
  128. f_rope_scale: Optional[float] = None
  129. ftype: Optional[GGMLFileType] = None
  130. # path to the directory containing the model files
  131. path_model: Optional['Path'] = None
  132. @staticmethod
  133. def find_n_mult(n_ff: int, n_embd: int) -> int:
  134. # hardcoded magic range
  135. for n_mult in range(8192, 1, -1):
  136. calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
  137. if calc_ff == n_ff:
  138. return n_mult
  139. raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
  140. @staticmethod
  141. def guessed(model: 'LazyModel') -> 'Params':
  142. # try transformer naming first
  143. n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape
  144. # try transformer naming first
  145. if "model.layers.0.self_attn.q_proj.weight" in model:
  146. n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
  147. elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
  148. n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
  149. else:
  150. n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
  151. if n_layer < 1:
  152. raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
  153. "Suggestion: provide 'config.json' of the model in the same directory containing model files.")
  154. n_head = n_embd // 128 # guessed
  155. n_mult = 256 # guessed
  156. # TODO: verify this
  157. n_ff = int(2 * (4 * n_embd) / 3)
  158. n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult)
  159. return Params(
  160. n_vocab = n_vocab,
  161. n_embd = n_embd,
  162. n_mult = n_mult,
  163. n_layer = n_layer,
  164. n_ctx = -1,
  165. n_ff = n_ff,
  166. n_head = n_head,
  167. n_head_kv = n_head,
  168. f_norm_eps = 1e-5,
  169. )
  170. @staticmethod
  171. def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
  172. config = json.load(open(config_path))
  173. n_vocab = config["vocab_size"]
  174. n_embd = config["hidden_size"]
  175. n_layer = config["num_hidden_layers"]
  176. n_ff = config["intermediate_size"]
  177. n_head = config["num_attention_heads"]
  178. n_head_kv = config["num_key_value_heads"] if "num_key_value_heads" in config else n_head
  179. f_norm_eps = config["rms_norm_eps"]
  180. f_rope_freq_base = config["rope_theta"] if "rope_theta" in config else None
  181. rope_scaling = config.get("rope_scaling")
  182. if isinstance(rope_scaling, dict) and rope_scaling.get("type") == "linear":
  183. f_rope_scale = config["rope_scaling"].get("factor")
  184. else:
  185. f_rope_scale = None
  186. n_mult = Params.find_n_mult(n_ff, n_embd)
  187. if "max_sequence_length" in config:
  188. n_ctx = config["max_sequence_length"]
  189. elif "max_position_embeddings" in config:
  190. n_ctx = config["max_position_embeddings"]
  191. else:
  192. raise Exception("failed to guess 'n_ctx'. This model is unknown or unsupported.\n"
  193. "Suggestion: provide 'config.json' of the model in the same directory containing model files.")
  194. return Params(
  195. n_vocab = n_vocab,
  196. n_embd = n_embd,
  197. n_mult = n_mult,
  198. n_layer = n_layer,
  199. n_ctx = n_ctx,
  200. n_ff = n_ff,
  201. n_head = n_head,
  202. n_head_kv = n_head_kv,
  203. f_norm_eps = f_norm_eps,
  204. f_rope_freq_base = f_rope_freq_base,
  205. f_rope_scale = f_rope_scale,
  206. )
  207. # LLaMA v2 70B params.json
  208. # {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1
  209. @staticmethod
  210. def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
  211. config = json.load(open(config_path))
  212. n_vocab = config["vocab_size"] if "vocab_size" in config else -1
  213. n_embd = config["dim"]
  214. n_layer = config["n_layers"]
  215. n_mult = config["multiple_of"]
  216. n_ff = -1
  217. n_head = config["n_heads"]
  218. n_head_kv = config["n_kv_heads"] if "n_kv_heads" in config else n_head
  219. f_norm_eps = config["norm_eps"]
  220. f_rope_freq_base = config["rope_theta"] if "rope_theta" in config else None
  221. # hack to determine LLaMA v1 vs v2 vs CodeLlama
  222. if f_rope_freq_base and f_rope_freq_base == 1000000:
  223. # CodeLlama
  224. n_ctx = 16384
  225. elif config["norm_eps"] == 1e-05:
  226. # LLaMA v2
  227. n_ctx = 4096
  228. else:
  229. # LLaMA v1
  230. n_ctx = 2048
  231. if n_vocab == -1:
  232. n_vocab = model["tok_embeddings.weight"].shape[0]
  233. if n_ff == -1:
  234. n_ff = model["layers.0.feed_forward.w1.weight"].shape[0]
  235. return Params(
  236. n_vocab = n_vocab,
  237. n_embd = n_embd,
  238. n_mult = n_mult,
  239. n_layer = n_layer,
  240. n_ctx = n_ctx,
  241. n_ff = n_ff,
  242. n_head = n_head,
  243. n_head_kv = n_head_kv,
  244. f_norm_eps = f_norm_eps,
  245. f_rope_freq_base = f_rope_freq_base,
  246. )
  247. @staticmethod
  248. def load(model_plus: 'ModelPlus') -> 'Params':
  249. hf_config_path = model_plus.paths[0].parent / "config.json"
  250. orig_config_path = model_plus.paths[0].parent / "params.json"
  251. if hf_config_path.exists():
  252. params = Params.loadHFTransformerJson(model_plus.model, hf_config_path)
  253. elif orig_config_path.exists():
  254. params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path)
  255. else:
  256. params = Params.guessed(model_plus.model)
  257. params.path_model = model_plus.paths[0].parent
  258. return params
  259. #
  260. # vocab
  261. #
  262. class BpeVocab:
  263. def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
  264. self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read())
  265. added_tokens: Dict[str, int]
  266. if fname_added_tokens is not None:
  267. added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
  268. else:
  269. added_tokens = {}
  270. vocab_size: int = len(self.bpe_tokenizer)
  271. expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
  272. actual_ids = sorted(added_tokens.values())
  273. if expected_ids != actual_ids:
  274. raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
  275. items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
  276. self.added_tokens_list = [text for (text, idx) in items]
  277. self.vocab_size_base: int = vocab_size
  278. self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
  279. self.fname_tokenizer = fname_tokenizer
  280. self.fname_added_tokens = fname_added_tokens
  281. def bpe_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
  282. tokenizer = self.bpe_tokenizer
  283. from transformers.models.gpt2 import tokenization_gpt2
  284. byte_encoder = tokenization_gpt2.bytes_to_unicode()
  285. byte_decoder = {v: k for k, v in byte_encoder.items()}
  286. for i, item in enumerate(tokenizer):
  287. text: bytes = item.encode("utf-8")
  288. score: float = -i
  289. yield text, score, gguf.TokenType.USER_DEFINED
  290. def added_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
  291. for text in self.added_tokens_list:
  292. score = -1000.0
  293. yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
  294. def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
  295. yield from self.bpe_tokens()
  296. yield from self.added_tokens()
  297. def __repr__(self) -> str:
  298. return f"BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
  299. class SentencePieceVocab:
  300. def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
  301. self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
  302. added_tokens: Dict[str, int]
  303. if fname_added_tokens is not None:
  304. added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
  305. else:
  306. added_tokens = {}
  307. vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
  308. expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
  309. actual_ids = sorted(added_tokens.values())
  310. if expected_ids != actual_ids:
  311. raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
  312. items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
  313. self.added_tokens_list = [text for (text, idx) in items]
  314. self.vocab_size_base: int = vocab_size
  315. self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
  316. self.fname_tokenizer = fname_tokenizer
  317. self.fname_added_tokens = fname_added_tokens
  318. def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
  319. tokenizer = self.sentencepiece_tokenizer
  320. for i in range(tokenizer.vocab_size()):
  321. piece = tokenizer.id_to_piece(i)
  322. text: bytes = piece.encode("utf-8")
  323. score: float = tokenizer.get_score(i)
  324. toktype = gguf.TokenType.NORMAL
  325. if tokenizer.is_unknown(i):
  326. toktype = gguf.TokenType.UNKNOWN
  327. if tokenizer.is_control(i):
  328. toktype = gguf.TokenType.CONTROL
  329. # NOTE: I think added_tokens are user defined.
  330. # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
  331. # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
  332. if tokenizer.is_unused(i):
  333. toktype = gguf.TokenType.UNUSED
  334. if tokenizer.is_byte(i):
  335. toktype = gguf.TokenType.BYTE
  336. yield text, score, toktype
  337. def added_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
  338. for text in self.added_tokens_list:
  339. score = -1000.0
  340. yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
  341. def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
  342. yield from self.sentencepiece_tokens()
  343. yield from self.added_tokens()
  344. def __repr__(self) -> str:
  345. return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
  346. Vocab = Union[BpeVocab, SentencePieceVocab]
  347. #
  348. # data loading
  349. # TODO: reuse (probably move to gguf.py?)
  350. #
  351. def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray:
  352. #print( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) )
  353. if n_head_kv is not None and n_head != n_head_kv:
  354. n_head //= n_head_kv
  355. return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
  356. .swapaxes(1, 2)
  357. .reshape(weights.shape))
  358. class Tensor(metaclass=ABCMeta):
  359. data_type: DataType
  360. @abstractmethod
  361. def astype(self, data_type: DataType) -> 'Tensor': ...
  362. @abstractmethod
  363. def permute(self, n_head: int, n_head_kv: int) -> 'Tensor': ...
  364. @abstractmethod
  365. def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
  366. @abstractmethod
  367. def part(self, n_part: int) -> 'UnquantizedTensor': ...
  368. @abstractmethod
  369. def to_ggml(self) -> 'GGMLCompatibleTensor': ...
  370. def bf16_to_fp32(bf16_arr: np.ndarray) -> np.ndarray:
  371. assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}"
  372. fp32_arr = bf16_arr.astype(np.uint32) << 16
  373. return fp32_arr.view(np.float32)
  374. class UnquantizedTensor(Tensor):
  375. def __init__(self, ndarray: NDArray) -> None:
  376. assert isinstance(ndarray, np.ndarray)
  377. self.ndarray = ndarray
  378. self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype]
  379. def astype(self, data_type: DataType) -> Tensor:
  380. dtype = data_type.dtype
  381. if self.data_type == DT_BF16:
  382. self.ndarray = bf16_to_fp32(self.ndarray)
  383. return UnquantizedTensor(self.ndarray.astype(dtype))
  384. def to_ggml(self) -> 'UnquantizedTensor':
  385. return self
  386. def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
  387. r = self.ndarray.shape[0] // 3
  388. return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head))
  389. def part(self, n_part: int) -> 'UnquantizedTensor':
  390. r = self.ndarray.shape[0] // 3
  391. return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
  392. def permute(self, n_head: int, n_head_kv: int) -> 'UnquantizedTensor':
  393. return UnquantizedTensor(permute(self.ndarray, n_head, n_head_kv))
  394. def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
  395. tensor = lazy_tensor.load()
  396. assert isinstance(tensor, UnquantizedTensor)
  397. # double-check:
  398. actual_shape = list(tensor.ndarray.shape)
  399. assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape)
  400. if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype:
  401. if convert:
  402. tensor.ndarray = tensor.ndarray.astype(expected_dtype)
  403. else:
  404. raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}')
  405. return tensor.ndarray
  406. GGMLCompatibleTensor = Union[UnquantizedTensor]
  407. @dataclass
  408. class LazyTensor:
  409. _load: Callable[[], Tensor]
  410. shape: List[int]
  411. data_type: DataType
  412. description: str
  413. def load(self) -> Tensor:
  414. ret = self._load()
  415. # Should be okay if it maps to the same numpy type?
  416. assert ret.data_type == self.data_type or (self.data_type.dtype == ret.data_type.dtype), \
  417. (self.data_type, ret.data_type, self.description)
  418. return ret
  419. def astype(self, data_type: DataType) -> 'LazyTensor':
  420. self.validate_conversion_to(data_type)
  421. def load() -> Tensor:
  422. return self.load().astype(data_type)
  423. return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}')
  424. def validate_conversion_to(self, data_type: DataType) -> None:
  425. if data_type != self.data_type and data_type.name not in self.data_type.valid_conversions:
  426. raise ValueError(f'Cannot validate conversion from {self.data_type} to {data_type}.')
  427. LazyModel = Dict[str, LazyTensor]
  428. @dataclass
  429. class ModelPlus:
  430. model: LazyModel
  431. paths: List[Path] # Where this was read from.
  432. format: Literal['ggml', 'torch', 'safetensors']
  433. vocab: Optional[Vocab] # For GGML models (which have vocab built in), the vocab.
  434. def merge_sharded(models: List[LazyModel]) -> LazyModel:
  435. # Original LLaMA models have each file contain one part of each tensor.
  436. # Use a dict instead of a set to preserve order.
  437. names = {name: None for model in models for name in model}
  438. def convert(name: str) -> LazyTensor:
  439. lazy_tensors: List[LazyTensor] = [model[name] for model in models]
  440. if len(lazy_tensors) == 1:
  441. # only one file; don't go through this procedure since there might
  442. # be quantized tensors
  443. return lazy_tensors[0]
  444. if len(lazy_tensors[0].shape) == 1:
  445. # the tensor is just duplicated in every file
  446. return lazy_tensors[0]
  447. if name.startswith('tok_embeddings.') or \
  448. name.endswith('.attention.wo.weight') or \
  449. name.endswith('.feed_forward.w2.weight'):
  450. # split by columns
  451. axis = 1
  452. else:
  453. # split by rows
  454. axis = 0
  455. concatenated_shape = list(lazy_tensors[0].shape)
  456. concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors)
  457. def load() -> UnquantizedTensor:
  458. ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors]
  459. concatenated: NDArray = np.concatenate(ndarrays, axis=axis)
  460. return UnquantizedTensor(concatenated)
  461. description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]'
  462. return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description)
  463. return {name: convert(name) for name in names}
  464. def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
  465. formats = set(mp.format for mp in models_plus)
  466. assert len(formats) == 1, "different formats?"
  467. format = formats.pop()
  468. paths = [path for mp in models_plus for path in mp.paths]
  469. # Use the first non-None vocab, if any.
  470. try:
  471. vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None)
  472. except StopIteration:
  473. vocab = None
  474. if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
  475. # Transformers models put different tensors in different files, but
  476. # don't split indivdual tensors between files.
  477. model: LazyModel = {}
  478. for mp in models_plus:
  479. model.update(mp.model)
  480. else:
  481. model = merge_sharded([mp.model for mp in models_plus])
  482. return ModelPlus(model, paths, format, vocab)
  483. def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor:
  484. def load() -> Tensor:
  485. return lazy_tensor.load().permute(n_head, n_head_kv)
  486. return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description)
  487. def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
  488. def load() -> Tensor:
  489. return lazy_tensor.load().permute_part(n_part, n_head)
  490. s = lazy_tensor.shape.copy()
  491. s[0] = s[0] // 3
  492. return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
  493. def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
  494. def load() -> Tensor:
  495. return lazy_tensor.load().part(n_part)
  496. s = lazy_tensor.shape.copy()
  497. s[0] = s[0] // 3
  498. return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
  499. # Functionality that simulates `torch.load` but where individual tensors are
  500. # only loaded into memory on demand, not all at once.
  501. # PyTorch can't do this natively as of time of writing:
  502. # - https://github.com/pytorch/pytorch/issues/64327
  503. # This allows us to de-shard without multiplying RAM usage, and also
  504. # conveniently drops the PyTorch dependency (though we still need numpy).
  505. @dataclass
  506. class LazyStorageKind:
  507. data_type: DataType
  508. @dataclass
  509. class LazyStorage:
  510. load: Callable[[int, int], NDArray]
  511. kind: LazyStorageKind
  512. description: str
  513. class LazyUnpickler(pickle.Unpickler):
  514. def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile):
  515. super().__init__(fp)
  516. self.data_base_path = data_base_path
  517. self.zip_file = zip_file
  518. def persistent_load(self, pid: Any) -> Any:
  519. assert pid[0] == 'storage'
  520. assert isinstance(pid[1], LazyStorageKind)
  521. data_type = pid[1].data_type
  522. filename_stem = pid[2]
  523. filename = self.data_base_path + '/' + filename_stem
  524. info = self.zip_file.getinfo(filename)
  525. def load(offset: int, elm_count: int) -> NDArray:
  526. dtype = data_type.dtype
  527. fp = self.zip_file.open(info)
  528. fp.seek(offset * dtype.itemsize)
  529. size = elm_count * dtype.itemsize
  530. data = fp.read(size)
  531. assert len(data) == size
  532. return np.frombuffer(data, dtype)
  533. description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}'
  534. return LazyStorage(load=load, kind=pid[1], description=description)
  535. # @staticmethod
  536. def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any,
  537. # pyright: ignore[reportSelfClsParameterName]
  538. requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor:
  539. assert isinstance(storage, LazyStorage)
  540. def load() -> UnquantizedTensor:
  541. elm_count = stride[0] * size[0]
  542. return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size))
  543. description = f'pickled storage_offset={storage_offset} in {storage.description}'
  544. return LazyTensor(load, list(size), storage.kind.data_type, description)
  545. # @staticmethod
  546. def rebuild_from_type_v2(func, new_type, args, state):
  547. return func(*args)
  548. CLASSES: Dict[Any, Any] = {
  549. ('torch._tensor', '_rebuild_from_type_v2'): rebuild_from_type_v2,
  550. ('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2,
  551. ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16),
  552. ('torch', 'HalfStorage'): LazyStorageKind(DT_F16),
  553. ('torch', 'FloatStorage'): LazyStorageKind(DT_F32),
  554. ('torch', 'IntStorage'): LazyStorageKind(DT_I32),
  555. ('torch', 'Tensor'): LazyTensor,
  556. }
  557. def find_class(self, module: str, name: str) -> Any:
  558. if not module.startswith('torch'):
  559. return super().find_class(module, name)
  560. return self.CLASSES[(module, name)]
  561. def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus:
  562. zf = zipfile.ZipFile(outer_fp)
  563. pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')]
  564. assert len(pickle_paths) == 1, pickle_paths
  565. pickle_fp = zf.open(pickle_paths[0], 'r')
  566. unpickler = LazyUnpickler(pickle_fp,
  567. data_base_path=pickle_paths[0][:-4],
  568. zip_file=zf)
  569. model = unpickler.load()
  570. as_dict = dict(model.items())
  571. return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None)
  572. def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus:
  573. header_size, = struct.unpack('<Q', fp.read(8))
  574. header: Dict[str, Dict[str, Any]] = json.loads(fp.read(header_size))
  575. # Use mmap for the actual data to avoid race conditions with the file offset.
  576. mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
  577. byte_buf = mapped[8 + header_size:]
  578. def convert(info: Dict[str, Any]) -> LazyTensor:
  579. data_type = SAFETENSORS_DATA_TYPES[info['dtype']]
  580. numpy_dtype = data_type.dtype
  581. shape: List[int] = info['shape']
  582. begin, end = info['data_offsets']
  583. assert 0 <= begin <= end <= len(byte_buf)
  584. assert end - begin == math.prod(shape) * numpy_dtype.itemsize
  585. buf = byte_buf[begin:end]
  586. def load() -> UnquantizedTensor:
  587. return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
  588. description = f'safetensors begin={begin} end={end} type={data_type} path={path}'
  589. return LazyTensor(load, shape, data_type, description)
  590. model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'}
  591. return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None)
  592. def must_read(fp: IO[bytes], length: int) -> bytes:
  593. ret = fp.read(length)
  594. if len(ret) < length:
  595. raise Exception("unexpectedly reached end of file")
  596. return ret
  597. @functools.lru_cache(maxsize=None)
  598. def lazy_load_file(path: Path) -> ModelPlus:
  599. fp = open(path, 'rb')
  600. first8 = fp.read(8)
  601. fp.seek(0)
  602. if first8[:2] == b'PK':
  603. # A zip file, i.e. PyTorch format
  604. return lazy_load_torch_file(fp, path)
  605. elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024:
  606. # Probably safetensors
  607. return lazy_load_safetensors_file(fp, path)
  608. else:
  609. raise ValueError(f"unknown format: {path}")
  610. In = TypeVar('In')
  611. Out = TypeVar('Out')
  612. def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int, max_workers: Optional[int] = None, factory: Callable = ThreadPoolExecutor) -> Iterable[Out]:
  613. '''Parallel map, but with backpressure. If the caller doesn't call `next`
  614. fast enough, this will stop calling `func` at some point rather than
  615. letting results pile up in memory. Specifically, there is a max of one
  616. output value buffered per thread.'''
  617. if concurrency < 2:
  618. yield from map(func, iterable)
  619. # Not reached.
  620. iterable = iter(iterable)
  621. with factory(max_workers = max_workers) as executor:
  622. futures: List[concurrent.futures.Future[Out]] = []
  623. done = False
  624. for _ in range(concurrency):
  625. try:
  626. futures.append(executor.submit(func, next(iterable)))
  627. except StopIteration:
  628. done = True
  629. break
  630. while futures:
  631. result = futures.pop(0).result()
  632. while not done and len(futures) < concurrency:
  633. try:
  634. futures.append(executor.submit(func, next(iterable)))
  635. except StopIteration:
  636. done = True
  637. break
  638. yield result
  639. def check_vocab_size(params: Params, vocab: Vocab) -> None:
  640. if params.n_vocab != vocab.vocab_size:
  641. assert isinstance(vocab, BpeVocab) or isinstance(vocab, SentencePieceVocab)
  642. if params.n_vocab == vocab.vocab_size_base:
  643. print("Ignoring added_tokens.json since model matches vocab size without it.")
  644. vocab.added_tokens_list = []
  645. vocab.vocab_size = vocab.vocab_size_base
  646. return
  647. msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}"
  648. if vocab.fname_added_tokens is not None:
  649. msg += f" combined with {vocab.fname_added_tokens}"
  650. msg += f" has {vocab.vocab_size})."
  651. if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20 and vocab.fname_added_tokens is None:
  652. msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})."
  653. raise Exception(msg)
  654. class OutputFile:
  655. def __init__(self, fname_out: Path) -> None:
  656. self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
  657. def add_meta_arch(self, params: Params) -> None:
  658. name = "LLaMA"
  659. if (params.n_ctx == 4096):
  660. name = "LLaMA v2"
  661. if params.path_model:
  662. name = str(params.path_model.parent).split('/')[-1]
  663. self.gguf.add_name (name)
  664. self.gguf.add_context_length (params.n_ctx)
  665. self.gguf.add_embedding_length (params.n_embd)
  666. self.gguf.add_block_count (params.n_layer)
  667. self.gguf.add_feed_forward_length (params.n_ff)
  668. self.gguf.add_rope_dimension_count(params.n_embd // params.n_head)
  669. self.gguf.add_head_count (params.n_head)
  670. self.gguf.add_head_count_kv (params.n_head_kv)
  671. self.gguf.add_layer_norm_rms_eps (params.f_norm_eps)
  672. if params.f_rope_freq_base:
  673. self.gguf.add_rope_freq_base(params.f_rope_freq_base)
  674. if params.f_rope_scale:
  675. self.gguf.add_rope_scale_linear(params.f_rope_scale)
  676. if params.ftype:
  677. self.gguf.add_file_type(params.ftype)
  678. def add_meta_vocab(self, vocab: Vocab) -> None:
  679. tokens = []
  680. scores = []
  681. toktypes = []
  682. # NOTE: `all_tokens` returns the the base vocabulary and added tokens
  683. # TODO: add special tokens?
  684. for text, score, toktype in vocab.all_tokens():
  685. tokens.append(text)
  686. scores.append(score)
  687. toktypes.append(toktype)
  688. self.gguf.add_tokenizer_model("llama")
  689. self.gguf.add_token_list(tokens)
  690. self.gguf.add_token_scores(scores)
  691. self.gguf.add_token_types(toktypes)
  692. def add_tensor_info(self, name: str, tensor: LazyTensor) -> None:
  693. n_elements = int(np.prod(tensor.shape))
  694. raw_dtype = getattr(tensor.data_type, 'ggml_type', None)
  695. data_type = getattr(tensor.data_type, 'quantized_type', None) or tensor.data_type.dtype
  696. data_nbytes = tensor.data_type.elements_to_bytes(n_elements)
  697. self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes, raw_dtype = raw_dtype)
  698. def write_meta(self) -> None:
  699. self.gguf.write_header_to_file()
  700. self.gguf.write_kv_data_to_file()
  701. def write_tensor_info(self) -> None:
  702. self.gguf.write_ti_data_to_file()
  703. def close(self) -> None:
  704. self.gguf.close()
  705. @staticmethod
  706. def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab) -> None:
  707. check_vocab_size(params, vocab)
  708. of = OutputFile(fname_out)
  709. # meta data
  710. of.add_meta_arch(params)
  711. of.add_meta_vocab(vocab)
  712. of.write_meta()
  713. of.close()
  714. @staticmethod
  715. def do_item(item: Tuple[str, LazyTensor]) -> Tuple[DataType, NDArray]:
  716. name, lazy_tensor = item
  717. tensor = lazy_tensor.load().to_ggml()
  718. return (lazy_tensor.data_type, tensor.ndarray)
  719. @staticmethod
  720. def maybe_do_quantize(item: Tuple[DataType, NDArray]) -> NDArray:
  721. dt, arr = item
  722. if not isinstance(dt, QuantizedDataType):
  723. return arr
  724. return dt.quantize(arr)
  725. @staticmethod
  726. def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, concurrency: int = DEFAULT_CONCURRENCY) -> None:
  727. check_vocab_size(params, vocab)
  728. of = OutputFile(fname_out)
  729. # meta data
  730. of.add_meta_arch(params)
  731. of.add_meta_vocab(vocab)
  732. # tensor info
  733. for name, lazy_tensor in model.items():
  734. of.add_tensor_info(name, lazy_tensor)
  735. of.write_meta()
  736. of.write_tensor_info()
  737. # tensor data
  738. ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency = concurrency)
  739. if ftype == GGMLFileType.MostlyQ8_0:
  740. ndarrays = bounded_parallel_map(OutputFile.maybe_do_quantize, ndarrays_inner, concurrency = concurrency, max_workers = concurrency, factory = ProcessPoolExecutor)
  741. else:
  742. ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner)
  743. start = time.time()
  744. for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
  745. elapsed = time.time() - start
  746. size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
  747. padi = len(str(len(model)))
  748. print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}")
  749. of.gguf.write_tensor_data(ndarray)
  750. of.close()
  751. def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType:
  752. wq_type = model[NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0)+".weight"].data_type
  753. if output_type_str == "f32" or (output_type_str is None and wq_type == DT_F32):
  754. return GGMLFileType.AllF32
  755. if output_type_str == "f16" or (output_type_str is None and wq_type in (DT_F16, DT_BF16)):
  756. return GGMLFileType.MostlyF16
  757. if output_type_str == "q8_0":
  758. return GGMLFileType.MostlyQ8_0
  759. name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()}
  760. raise Exception(f"Unexpected combination of types: {name_to_type}")
  761. def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
  762. return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
  763. for (name, tensor) in model.items()}
  764. def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
  765. tmap = gguf.get_tensor_name_map(ARCH, params.n_layer)
  766. tmp = model
  767. # HF models permut or pack some of the tensors, so we need to undo that
  768. for i in itertools.count():
  769. if f"model.layers.{i}.self_attn.q_proj.weight" in model:
  770. print(f"Permuting layer {i}")
  771. tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head)
  772. tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv)
  773. #tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
  774. elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
  775. print(f"Unpacking and permuting layer {i}")
  776. tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head)
  777. tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv)
  778. tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
  779. else:
  780. break
  781. out: LazyModel = {}
  782. for name, lazy_tensor in model.items():
  783. name_new = name
  784. if name in tmap:
  785. name_new = tmap[name]
  786. elif name.endswith(".weight") and name[:-7] in tmap:
  787. name_new = tmap[name[:-7]] + ".weight"
  788. elif name.endswith(".bias") and name[:-5] in tmap:
  789. name_new = tmap[name[:-5]] + ".bias"
  790. else:
  791. raise Exception(f"Unexpected tensor name: {name}")
  792. if gguf.should_skip_tensor_TMP(ARCH, params.n_layer, name_new):
  793. print(f"skipping tensor {name_new}")
  794. continue
  795. else:
  796. print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}")
  797. out[name_new] = lazy_tensor
  798. return out
  799. def nth_multifile_path(path: Path, n: int) -> Optional[Path]:
  800. '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
  801. the nth path in the model.
  802. '''
  803. # Support the following patterns:
  804. patterns: List[Tuple[str, str]] = [
  805. # - x.00.pth, x.01.pth, etc.
  806. (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'),
  807. # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc.
  808. (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'),
  809. # x.bin, x.bin.1, etc.
  810. (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}')
  811. ]
  812. for regex, replacement in patterns:
  813. if re.search(regex, path.name):
  814. new_path = path.with_name(re.sub(regex, replacement, path.name))
  815. if new_path.exists():
  816. return new_path
  817. return None
  818. def find_multifile_paths(path: Path) -> List[Path]:
  819. '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
  820. the whole list of paths in the model.
  821. '''
  822. ret: List[Path] = []
  823. for i in itertools.count():
  824. nth_path = nth_multifile_path(path, i)
  825. if nth_path is None:
  826. break
  827. ret.append(nth_path)
  828. if not ret:
  829. # No matches. This should only happen if the file was named, e.g.,
  830. # foo.0, and there was no file named foo. Oh well, try to process it
  831. # as a single file.
  832. return [path]
  833. return ret
  834. def load_some_model(path: Path) -> ModelPlus:
  835. '''Load a model of any supported format.'''
  836. # Be extra-friendly and accept either a file or a directory:
  837. if path.is_dir():
  838. # Check if it's a set of safetensors files first
  839. files = list(path.glob("model-00001-of-*.safetensors"))
  840. if not files:
  841. # Try the PyTorch patterns too, with lower priority
  842. globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
  843. files = [file for glob in globs for file in path.glob(glob)]
  844. if not files:
  845. raise Exception(f"Can't find model in directory {path}")
  846. if len(files) > 1:
  847. raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}")
  848. path = files[0]
  849. paths = find_multifile_paths(path)
  850. models_plus: List[ModelPlus] = []
  851. for path in paths:
  852. print(f"Loading model file {path}")
  853. models_plus.append(lazy_load_file(path))
  854. model_plus = merge_multifile_models(models_plus)
  855. return model_plus
  856. def load_vocab(path: Path, vocabtype: Optional[str]) -> Union[BpeVocab, SentencePieceVocab]:
  857. # Be extra-friendly and accept either a file or a directory. Also, if it's
  858. # a directory, it might be the model directory, and tokenizer.model might
  859. # be in the parent of that.
  860. if path.is_dir():
  861. vocab_file = "tokenizer.model"
  862. if vocabtype == 'bpe':
  863. vocab_file = "vocab.json"
  864. path2 = path / vocab_file
  865. # Use `.parent` instead of /.. to handle the symlink case better.
  866. path3 = path.parent / vocab_file
  867. if path2.exists():
  868. path = path2
  869. elif path3.exists():
  870. path = path3
  871. else:
  872. raise FileNotFoundError(
  873. f"Could not find {vocab_file} in {path} or its parent; "
  874. "if it's in another directory, pass the directory as --vocab-dir")
  875. print(f"Loading vocab file '{path}', type '{vocabtype}'")
  876. added_tokens_path = path.parent / "added_tokens.json"
  877. if vocabtype == "bpe":
  878. return BpeVocab(path, added_tokens_path if added_tokens_path.exists() else None)
  879. elif vocabtype == "spm":
  880. return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None)
  881. else:
  882. raise ValueError(f"Unsupported vocabulary type {vocabtype}")
  883. def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
  884. namestr = {
  885. GGMLFileType.AllF32: "f32",
  886. GGMLFileType.MostlyF16: "f16",
  887. GGMLFileType.MostlyQ8_0:"q8_0",
  888. }[file_type]
  889. ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf"
  890. if ret in model_paths:
  891. sys.stderr.write(
  892. f"Error: Default output path ({ret}) would overwrite the input. "
  893. "Please explicitly specify a path using --outfile.\n")
  894. sys.exit(1)
  895. return ret
  896. def do_dump_model(model_plus: ModelPlus) -> None:
  897. print(f"model_plus.paths = {model_plus.paths!r}")
  898. print(f"model_plus.format = {model_plus.format!r}")
  899. print(f"model_plus.vocab = {model_plus.vocab!r}")
  900. for name, lazy_tensor in model_plus.model.items():
  901. print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
  902. def main(args_in: Optional[List[str]] = None) -> None:
  903. parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file")
  904. parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
  905. parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
  906. parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
  907. parser.add_argument("--outtype", choices=["f32", "f16", "q8_0"], help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)")
  908. parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
  909. parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
  910. parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
  911. parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)", default="spm")
  912. parser.add_argument("--ctx", type=int, help="model training context (default: based on input)")
  913. parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default = DEFAULT_CONCURRENCY)
  914. args = parser.parse_args(args_in)
  915. if args.dump_single:
  916. model_plus = lazy_load_file(args.model)
  917. do_dump_model(model_plus)
  918. model_plus = load_some_model(args.model)
  919. params = Params.load(model_plus)
  920. if params.n_ctx == -1:
  921. if args.ctx is None:
  922. raise Exception("The model doesn't have a context size, and you didn't specify one with --ctx\n"
  923. "Please specify one with --ctx:\n"
  924. " - LLaMA v1: --ctx 2048\n"
  925. " - LLaMA v2: --ctx 4096\n")
  926. params.n_ctx = args.ctx
  927. if args.outtype:
  928. params.ftype = {
  929. "f32": GGMLFileType.AllF32,
  930. "f16": GGMLFileType.MostlyF16,
  931. "q8_0": GGMLFileType.MostlyQ8_0,
  932. }[args.outtype]
  933. print(f"params = {params}")
  934. vocab: Vocab
  935. if args.vocab_only:
  936. vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype)
  937. assert args.outfile, "need --outfile if using --vocab-only"
  938. outfile = args.outfile
  939. OutputFile.write_vocab_only(outfile, params, vocab)
  940. print(f"Wrote {outfile}")
  941. else:
  942. if args.dump:
  943. do_dump_model(model_plus)
  944. return
  945. if model_plus.vocab is not None and args.vocab_dir is None:
  946. vocab = model_plus.vocab
  947. else:
  948. vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
  949. vocab = load_vocab(vocab_dir, args.vocabtype)
  950. model = model_plus.model
  951. model = convert_model_names(model, params)
  952. ftype = pick_output_type(model, args.outtype)
  953. model = convert_to_output_type(model, ftype)
  954. outfile = args.outfile or default_outfile(model_plus.paths, ftype)
  955. params.ftype = ftype
  956. print(f"Writing {outfile}, format {ftype}")
  957. OutputFile.write_all(outfile, ftype, params, model, vocab, concurrency = args.concurrency)
  958. print(f"Wrote {outfile}")
  959. if __name__ == '__main__':
  960. main()