convert_image_encoder_to_gguf.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. import argparse
  2. import os
  3. import json
  4. import re
  5. import torch
  6. import numpy as np
  7. from gguf import *
  8. from transformers import CLIPModel, CLIPProcessor, CLIPVisionModel, SiglipVisionModel
  9. TEXT = "clip.text"
  10. VISION = "clip.vision"
  11. def k(raw_key: str, arch: str) -> str:
  12. return raw_key.format(arch=arch)
  13. def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_llava: bool) -> bool:
  14. if name in (
  15. "logit_scale",
  16. "text_model.embeddings.position_ids",
  17. "vision_model.embeddings.position_ids",
  18. ):
  19. return True
  20. if has_llava and name in ["visual_projection.weight", "vision_model.post_layernorm.weight", "vision_model.post_layernorm.bias"]:
  21. return True
  22. if name.startswith("v") and not has_vision:
  23. return True
  24. if name.startswith("t") and not has_text:
  25. return True
  26. return False
  27. def get_tensor_name(name: str) -> str:
  28. # Standardize the transformers llava next keys for
  29. # image newline / mm projector with the classes in haotian-liu LLaVA
  30. if name == "image_newline":
  31. return "model.image_newline"
  32. if name.startswith("multi_modal_projector"):
  33. name = name.replace("multi_modal_projector", "mm")
  34. if "linear_1" in name:
  35. name = name.replace("linear_1", "0")
  36. if "linear_2" in name:
  37. name = name.replace("linear_2", "2")
  38. return name
  39. if "projection" in name:
  40. return name
  41. if "mm_projector" in name:
  42. name = name.replace("model.mm_projector", "mm")
  43. name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1)
  44. name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1)
  45. return name
  46. return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln")
  47. def bytes_to_unicode():
  48. """
  49. Returns list of utf-8 byte and a corresponding list of unicode strings.
  50. The reversible bpe codes work on unicode strings.
  51. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
  52. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
  53. This is a significant percentage of your normal, say, 32K bpe vocab.
  54. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
  55. And avoids mapping to whitespace/control characters the bpe code barfs on.
  56. """
  57. bs = (
  58. list(range(ord("!"), ord("~") + 1))
  59. + list(range(ord("¡"), ord("¬") + 1))
  60. + list(range(ord("®"), ord("ÿ") + 1))
  61. )
  62. cs = bs[:]
  63. n = 0
  64. for b in range(2**8):
  65. if b not in bs:
  66. bs.append(b)
  67. cs.append(2**8 + n)
  68. n += 1
  69. cs = [chr(n) for n in cs]
  70. return dict(zip(bs, cs))
  71. ap = argparse.ArgumentParser()
  72. ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True)
  73. ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16")
  74. ap.add_argument('--bigendian', action="store_true", default=False, help="Model is executed on big-endian machine")
  75. ap.add_argument("--text-only", action="store_true", required=False,
  76. help="Save a text-only model. It can't be used to encode images")
  77. ap.add_argument("--vision-only", action="store_true", required=False,
  78. help="Save a vision-only model. It can't be used to encode texts")
  79. ap.add_argument("--clip-model-is-vision", action="store_true", required=False,
  80. help="The clip model is a pure vision model (ShareGPT4V vision extract for example)")
  81. # Selectable visual encoders that are compatible with this script
  82. encoder_group = ap.add_mutually_exclusive_group()
  83. encoder_group.add_argument("--clip-model-is-openclip", action="store_true", required=False,
  84. help="The clip model is from openclip (for ViT-SO400M type))")
  85. encoder_group.add_argument("--clip-model-is-siglip", action="store_true", required=False,
  86. help="the visual encoder is Siglip.")
  87. ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
  88. ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp, ldpv2", choices=["mlp", "ldp", "ldpv2"], default="mlp")
  89. ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
  90. # Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
  91. # Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
  92. default_image_mean = [0.48145466, 0.4578275, 0.40821073]
  93. default_image_std = [0.26862954, 0.26130258, 0.27577711]
  94. ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
  95. ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
  96. # with proper
  97. args = ap.parse_args()
  98. if args.text_only and args.vision_only:
  99. print("--text-only and --image-only arguments cannot be specified at the same time.")
  100. exit(1)
  101. if args.use_f32:
  102. print("WARNING: Weights for the convolution op is always saved in f16, as the convolution op in GGML does not support 32-bit kernel weights yet.")
  103. # output in the same directory as the model if output_dir is None
  104. dir_model = args.model_dir
  105. if (
  106. args.clip_model_is_vision or
  107. not os.path.exists(dir_model + "/vocab.json") or
  108. args.clip_model_is_openclip or
  109. args.clip_model_is_siglip
  110. ):
  111. vocab = None
  112. tokens = None
  113. else:
  114. with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f:
  115. vocab = json.load(f)
  116. tokens = [key for key in vocab]
  117. with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
  118. config = json.load(f)
  119. if args.clip_model_is_vision:
  120. v_hparams = config
  121. t_hparams = None
  122. else:
  123. v_hparams = config["vision_config"]
  124. t_hparams = config["text_config"]
  125. # possible data types
  126. # ftype == 0 -> float32
  127. # ftype == 1 -> float16
  128. #
  129. # map from ftype to string
  130. ftype_str = ["f32", "f16"]
  131. ftype = 1
  132. if args.use_f32:
  133. ftype = 0
  134. if args.clip_model_is_siglip:
  135. model = SiglipVisionModel.from_pretrained(dir_model)
  136. processor = None
  137. elif args.clip_model_is_vision or args.clip_model_is_openclip:
  138. model = CLIPVisionModel.from_pretrained(dir_model)
  139. processor = None
  140. else:
  141. model = CLIPModel.from_pretrained(dir_model)
  142. processor = CLIPProcessor.from_pretrained(dir_model)
  143. fname_middle = None
  144. has_text_encoder = True
  145. has_vision_encoder = True
  146. has_llava_projector = False
  147. if args.text_only:
  148. fname_middle = "text-"
  149. has_vision_encoder = False
  150. elif args.llava_projector is not None:
  151. fname_middle = "mmproj-"
  152. has_text_encoder = False
  153. has_llava_projector = True
  154. elif args.vision_only:
  155. fname_middle = "vision-"
  156. has_text_encoder = False
  157. else:
  158. fname_middle = ""
  159. output_dir = args.output_dir if args.output_dir is not None else dir_model
  160. os.makedirs(output_dir, exist_ok=True)
  161. output_prefix = os.path.basename(output_dir).replace("ggml_", "")
  162. fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf")
  163. fout = GGUFWriter(path=fname_out, arch="clip", endianess=GGUFEndian.LITTLE if not args.bigendian else GGUFEndian.BIG)
  164. fout.add_bool("clip.has_text_encoder", has_text_encoder)
  165. fout.add_bool("clip.has_vision_encoder", has_vision_encoder)
  166. fout.add_bool("clip.has_llava_projector", has_llava_projector)
  167. fout.add_file_type(ftype)
  168. model_name = config["_name_or_path"] if "_name_or_path" in config else os.path.basename(dir_model)
  169. fout.add_name(model_name)
  170. if args.text_only:
  171. fout.add_description("text-only CLIP model")
  172. elif args.vision_only and not has_llava_projector:
  173. fout.add_description("vision-only CLIP model")
  174. elif has_llava_projector:
  175. fout.add_description("image encoder for LLaVA")
  176. # add projector type
  177. fout.add_string("clip.projector_type", args.projector_type)
  178. else:
  179. fout.add_description("two-tower CLIP model")
  180. if has_text_encoder:
  181. assert t_hparams is not None
  182. assert tokens is not None
  183. if args.clip_model_is_siglip:
  184. text_projection_dim = 0
  185. else:
  186. text_projection_dim = t_hparams.get("projection_dim", config["projection_dim"])
  187. # text_model hparams
  188. fout.add_uint32(k(KEY_CONTEXT_LENGTH, TEXT), t_hparams["max_position_embeddings"])
  189. fout.add_uint32(k(KEY_EMBEDDING_LENGTH, TEXT), t_hparams["hidden_size"])
  190. fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, TEXT), t_hparams["intermediate_size"])
  191. fout.add_uint32("clip.text.projection_dim", text_projection_dim)
  192. fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, TEXT), t_hparams["num_attention_heads"])
  193. fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, TEXT), t_hparams["layer_norm_eps"])
  194. fout.add_uint32(k(KEY_BLOCK_COUNT, TEXT), t_hparams["num_hidden_layers"])
  195. fout.add_token_list(tokens)
  196. def get_non_negative_vision_feature_layers(v_hparams):
  197. """
  198. Determine the vision feature layer(s) for the llava model, which are indices into the
  199. hidden states of the visual encoder. Note that the hidden states array generally takes the
  200. form:
  201. [<emb input>, <output of enc block 0>, ... <output of enc block num_hidden_layers>]
  202. so feature indices should be offset as n+1 to get the output of encoder block n.
  203. We convert all vision feature layers to non-negative so that -1 can be used in
  204. the model as an unset value. If no vision feature layer is found, we leave it unset.
  205. """
  206. num_hidden_layers = v_hparams["num_hidden_layers"]
  207. to_non_negative = lambda layer_idx: layer_idx if layer_idx >= 0 else num_hidden_layers + layer_idx + 1
  208. feature_layers_key = None
  209. # Key used for llava models in transformers
  210. if "vision_feature_layer" in config:
  211. feature_layers_key = "vision_feature_layer"
  212. # Key used for llava models in the original format
  213. elif "mm_vision_select_layer" in config:
  214. feature_layers_key = "mm_vision_select_layer"
  215. if feature_layers_key is not None:
  216. feature_layers = config[feature_layers_key]
  217. if isinstance(feature_layers, int):
  218. feature_layers = [feature_layers]
  219. return [to_non_negative(feature_layer) for feature_layer in feature_layers]
  220. # Determine if we have explicitly specified vision feature layers in our config
  221. feature_layers = get_non_negative_vision_feature_layers(v_hparams)
  222. if has_vision_encoder:
  223. # Siglip does not have a visual projector; set projection dim to 0
  224. if args.clip_model_is_siglip:
  225. visual_projection_dim = 0
  226. else:
  227. visual_projection_dim = v_hparams.get("projection_dim", config["projection_dim"])
  228. # set vision_model hparams
  229. fout.add_uint32("clip.vision.image_size", v_hparams["image_size"])
  230. fout.add_uint32("clip.vision.patch_size", v_hparams["patch_size"])
  231. fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), v_hparams["hidden_size"])
  232. fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), v_hparams["intermediate_size"])
  233. fout.add_uint32("clip.vision.projection_dim", visual_projection_dim)
  234. fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), v_hparams["num_attention_heads"])
  235. fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), v_hparams["layer_norm_eps"])
  236. if feature_layers:
  237. block_count = max(feature_layers)
  238. else:
  239. block_count = v_hparams["num_hidden_layers"] - 1 if has_llava_projector else v_hparams["num_hidden_layers"]
  240. fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), block_count)
  241. # /**
  242. # "image_grid_pinpoints": [
  243. # [
  244. # 336,
  245. # 672
  246. # ],
  247. # [
  248. # 672,
  249. # 336
  250. # ],
  251. # [
  252. # 672,
  253. # 672
  254. # ],
  255. # [
  256. # 1008,
  257. # 336
  258. # ],
  259. # [
  260. # 336,
  261. # 1008
  262. # ]
  263. # ],
  264. # Flattened:
  265. # [
  266. # 336, 672,
  267. # 672, 336,
  268. # 672, 672,
  269. # 1008, 336,
  270. # 336, 1008
  271. # ]
  272. # *
  273. # */
  274. if "image_grid_pinpoints" in v_hparams:
  275. # flatten it
  276. image_grid_pinpoints = []
  277. for pinpoint in v_hparams["image_grid_pinpoints"]:
  278. for p in pinpoint:
  279. image_grid_pinpoints.append(p)
  280. fout.add_array("clip.vision.image_grid_pinpoints", image_grid_pinpoints)
  281. if "image_crop_resolution" in v_hparams:
  282. fout.add_uint32("clip.vision.image_crop_resolution", v_hparams["image_crop_resolution"])
  283. if "image_aspect_ratio" in v_hparams:
  284. fout.add_string("clip.vision.image_aspect_ratio", v_hparams["image_aspect_ratio"])
  285. if "image_split_resolution" in v_hparams:
  286. fout.add_uint32("clip.vision.image_split_resolution", v_hparams["image_split_resolution"])
  287. if "mm_patch_merge_type" in v_hparams:
  288. fout.add_string("clip.vision.mm_patch_merge_type", v_hparams["mm_patch_merge_type"])
  289. if "mm_projector_type" in v_hparams:
  290. fout.add_string("clip.vision.mm_projector_type", v_hparams["mm_projector_type"])
  291. if feature_layers:
  292. fout.add_array("clip.vision.feature_layer", feature_layers)
  293. if processor is not None:
  294. image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean # pyright: ignore[reportAttributeAccessIssue]
  295. image_std = processor.image_processor.image_std if args.image_std is None or args.image_std == default_image_std else args.image_std # pyright: ignore[reportAttributeAccessIssue]
  296. else:
  297. image_mean = args.image_mean if args.image_mean is not None else default_image_mean
  298. image_std = args.image_std if args.image_std is not None else default_image_std
  299. fout.add_array("clip.vision.image_mean", image_mean)
  300. fout.add_array("clip.vision.image_std", image_std)
  301. use_gelu = v_hparams["hidden_act"] == "gelu"
  302. fout.add_bool("clip.use_gelu", use_gelu)
  303. if has_llava_projector:
  304. # By default, we drop the last layer for llava projector
  305. # models unless we have explicitly set vision feature layers
  306. if feature_layers is None:
  307. model.vision_model.encoder.layers.pop(-1)
  308. else:
  309. model.vision_model.encoder.layers = model.vision_model.encoder.layers[:max(feature_layers)]
  310. projector = torch.load(args.llava_projector)
  311. for name, data in projector.items():
  312. name = get_tensor_name(name)
  313. # pw and dw conv ndim==4
  314. if data.ndim == 2 or data.ndim == 4:
  315. data = data.squeeze().numpy().astype(np.float16)
  316. else:
  317. data = data.squeeze().numpy().astype(np.float32)
  318. fout.add_tensor(name, data)
  319. print("Projector tensors added\n")
  320. state_dict = model.state_dict()
  321. for name, data in state_dict.items():
  322. if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_llava_projector):
  323. # we don't need this
  324. print(f"skipping parameter: {name}")
  325. continue
  326. name = get_tensor_name(name)
  327. data = data.squeeze().numpy()
  328. n_dims = len(data.shape)
  329. # ftype == 0 -> float32, ftype == 1 -> float16
  330. ftype_cur = 0
  331. if n_dims == 4:
  332. print(f"tensor {name} is always saved in f16")
  333. data = data.astype(np.float16)
  334. ftype_cur = 1
  335. elif ftype == 1:
  336. if name[-7:] == ".weight" and n_dims == 2:
  337. print(" Converting to float16")
  338. data = data.astype(np.float16)
  339. ftype_cur = 1
  340. else:
  341. print(" Converting to float32")
  342. data = data.astype(np.float32)
  343. ftype_cur = 0
  344. else:
  345. if data.dtype != np.float32:
  346. print(" Converting to float32")
  347. data = data.astype(np.float32)
  348. ftype_cur = 0
  349. print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}")
  350. fout.add_tensor(name, data)
  351. fout.write_header_to_file()
  352. fout.write_kv_data_to_file()
  353. fout.write_tensors_to_file()
  354. fout.close()
  355. print("Done. Output file: " + fname_out)