convert-falcon-hf-to-gguf.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. #!/usr/bin/env python3
  2. # HF falcon--> gguf conversion
  3. from __future__ import annotations
  4. import argparse
  5. import contextlib
  6. import json
  7. import os
  8. import struct
  9. import sys
  10. from pathlib import Path
  11. from typing import Any
  12. import numpy as np
  13. import torch
  14. from transformers import AutoTokenizer # type: ignore[import]
  15. if 'NO_LOCAL_GGUF' not in os.environ:
  16. sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
  17. import gguf
  18. def count_model_parts(dir_model: Path, prefix: str) -> int:
  19. num_parts = 0
  20. for filename in os.listdir(dir_model):
  21. if filename.startswith(prefix):
  22. num_parts += 1
  23. if num_parts > 0:
  24. print("gguf: found " + str(num_parts) + " model parts")
  25. return num_parts
  26. def parse_args() -> argparse.Namespace:
  27. parser = argparse.ArgumentParser(description="Convert a Falcon model to a GGML compatible file")
  28. parser.add_argument(
  29. "--vocab-only", action="store_true",
  30. help="extract only the vocab",
  31. )
  32. parser.add_argument(
  33. "--outfile", type=Path,
  34. help="path to write to; default: based on input",
  35. )
  36. parser.add_argument(
  37. "model", type=Path,
  38. help="directory containing model file, or model file itself (*.bin)",
  39. )
  40. parser.add_argument(
  41. "ftype", type=int, choices=[0, 1], default=1, nargs='?',
  42. help="output format - use 0 for float32, 1 for float16",
  43. )
  44. return parser.parse_args()
  45. args = parse_args()
  46. dir_model = args.model
  47. ftype = args.ftype
  48. if not dir_model.is_dir():
  49. print(f'Error: {args.model} is not a directory', file = sys.stderr)
  50. sys.exit(1)
  51. # possible tensor data types
  52. # ftype == 0 -> float32
  53. # ftype == 1 -> float16
  54. # map from ftype to string
  55. ftype_str = ["f32", "f16"]
  56. if args.outfile is not None:
  57. fname_out = args.outfile
  58. else:
  59. # output in the same directory as the model by default
  60. fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf'
  61. print("gguf: loading model "+dir_model.name)
  62. with open(dir_model / "config.json", "r", encoding="utf-8") as f:
  63. hparams = json.load(f)
  64. if hparams["architectures"][0] not in ("RWForCausalLM", "FalconForCausalLM"):
  65. print("Model architecture not supported: " + hparams["architectures"][0])
  66. sys.exit(1)
  67. # get number of model parts
  68. num_parts = count_model_parts(dir_model, "model-00")
  69. if num_parts:
  70. is_safetensors = True
  71. from safetensors import safe_open
  72. else:
  73. is_safetensors = False
  74. num_parts = count_model_parts(dir_model, "pytorch_model-")
  75. ARCH=gguf.MODEL_ARCH.FALCON
  76. gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
  77. print("gguf: get model metadata")
  78. block_count = hparams.get("num_hidden_layers")
  79. if block_count is None:
  80. block_count = hparams["n_layer"] # old name
  81. n_head = hparams.get("num_attention_heads")
  82. if n_head is None:
  83. n_head = hparams["n_head"] # old name
  84. n_head_kv = hparams.get("num_kv_heads")
  85. if n_head_kv is None:
  86. n_head_kv = hparams.get("n_head_kv", 1) # old name
  87. gguf_writer.add_name("Falcon")
  88. gguf_writer.add_context_length(2048) # not in config.json
  89. gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
  90. gguf_writer.add_embedding_length(hparams["hidden_size"])
  91. gguf_writer.add_feed_forward_length(4 * hparams["hidden_size"])
  92. gguf_writer.add_block_count(block_count)
  93. gguf_writer.add_head_count(n_head)
  94. gguf_writer.add_head_count_kv(n_head_kv)
  95. gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"])
  96. gguf_writer.add_file_type(ftype)
  97. # TOKENIZATION
  98. print("gguf: get tokenizer metadata")
  99. tokens: list[bytearray] = []
  100. scores: list[float] = []
  101. toktypes: list[int] = []
  102. # gpt2 tokenizer
  103. gguf_writer.add_tokenizer_model("gpt2")
  104. print("gguf: get gpt2 tokenizer vocab")
  105. # ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py
  106. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  107. # The number of tokens in tokenizer.json can differ from the expected vocab size.
  108. # This causes downstream issues with mismatched tensor sizes when running the inference
  109. vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
  110. assert max(tokenizer.vocab.values()) < vocab_size
  111. reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
  112. for i in range(vocab_size):
  113. tokens.append(reverse_vocab[i])
  114. scores.append(0.0) # dummy
  115. toktypes.append(gguf.TokenType.NORMAL)
  116. gguf_writer.add_token_list(tokens)
  117. gguf_writer.add_token_scores(scores)
  118. gguf_writer.add_token_types(toktypes)
  119. special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
  120. special_vocab.add_to_gguf(gguf_writer)
  121. # TENSORS
  122. tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
  123. head_dim = hparams["hidden_size"] // n_head
  124. # tensor info
  125. print("gguf: get tensor metadata")
  126. if num_parts == 0:
  127. part_names = iter(("pytorch_model.bin",))
  128. elif is_safetensors:
  129. part_names = (
  130. f"model-{n:05}-of-{num_parts:05}.safetensors" for n in range(1, num_parts + 1)
  131. )
  132. else:
  133. part_names = (
  134. f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
  135. )
  136. for part_name in part_names:
  137. if args.vocab_only:
  138. break
  139. print("gguf: loading model part '" + part_name + "'")
  140. if is_safetensors:
  141. ctx = safe_open(dir_model / part_name, framework="pt", device="cpu")
  142. else:
  143. ctx = contextlib.nullcontext(torch.load(dir_model / part_name, map_location="cpu"))
  144. with ctx as model_part:
  145. for name in model_part.keys():
  146. data = model_part.get_tensor(name) if is_safetensors else model_part[name]
  147. old_dtype = data.dtype
  148. # convert any unsupported data types to float32
  149. if data.dtype != torch.float16 and data.dtype != torch.float32:
  150. data = data.to(torch.float32)
  151. # QKV tensor transform
  152. # The original query_key_value tensor contains n_head_kv "kv groups",
  153. # each consisting of n_head/n_head_kv query weights followed by one key
  154. # and one value weight (shared by all query heads in the kv group).
  155. # This layout makes it a big pain to work with in GGML.
  156. # So we rearrange them here,, so that we have n_head query weights
  157. # followed by n_head_kv key weights followed by n_head_kv value weights,
  158. # in contiguous fashion.
  159. # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
  160. if "query_key_value" in name:
  161. qkv = data.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
  162. q = qkv[:, :-2 ].reshape(n_head * head_dim, head_dim * n_head)
  163. k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
  164. v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
  165. data = torch.cat((q,k,v)).reshape_as(data)
  166. data = data.squeeze().numpy()
  167. # map tensor names
  168. new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
  169. if new_name is None:
  170. print("Can not map tensor '" + name + "'")
  171. sys.exit()
  172. n_dims = len(data.shape)
  173. data_dtype = data.dtype
  174. # if f32 desired, convert any float16 to float32
  175. if ftype == 0 and data_dtype == np.float16:
  176. data = data.astype(np.float32)
  177. # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
  178. if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
  179. data = data.astype(np.float32)
  180. # if f16 desired, convert any float32 2-dim weight tensors to float16
  181. if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
  182. data = data.astype(np.float16)
  183. print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
  184. gguf_writer.add_tensor(new_name, data)
  185. print("gguf: write header")
  186. gguf_writer.write_header_to_file()
  187. print("gguf: write metadata")
  188. gguf_writer.write_kv_data_to_file()
  189. if not args.vocab_only:
  190. print("gguf: write tensors")
  191. gguf_writer.write_tensors_to_file()
  192. gguf_writer.close()
  193. print(f"gguf: model successfully exported to '{fname_out}'")
  194. print("")