convert-gptq-to-ggml.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. # Convert a GPTQ quantized LLaMA model to a ggml compatible file
  2. # Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
  3. #
  4. import os
  5. import re
  6. import sys
  7. import json
  8. import struct
  9. import numpy as np
  10. import torch
  11. from sentencepiece import SentencePieceProcessor
  12. if len(sys.argv) != 4:
  13. print("Usage: convert-gptq-to-ggml.py llamaXXb-4bit.pt tokenizer.model out.bin\n")
  14. sys.exit(1)
  15. fname_model = sys.argv[1]
  16. fname_tokenizer = sys.argv[2]
  17. dir_out = sys.argv[3]
  18. model = torch.load(fname_model, map_location="cpu")
  19. n_vocab, n_embd = model['model.embed_tokens.weight'].shape
  20. n_layer = 1 + max(int(m.group(1)) for name in model
  21. if (m := re.match(r'model\.layers\.([0-9]+)', name)))
  22. # hardcoded:
  23. n_mult = 256
  24. n_head = {32: 32, 40: 40, 60: 52, 80: 64}[n_layer]
  25. tokenizer = SentencePieceProcessor(fname_tokenizer)
  26. assert tokenizer.vocab_size() == n_vocab
  27. fname_out = sys.argv[3]
  28. fout = open(fname_out, "wb")
  29. fout.write(struct.pack("i", 0x67676d66)) # magic: ggmf in hex
  30. fout.write(struct.pack("i", 1)) # file version
  31. fout.write(struct.pack("i", n_vocab))
  32. fout.write(struct.pack("i", n_embd))
  33. fout.write(struct.pack("i", n_mult))
  34. fout.write(struct.pack("i", n_head))
  35. fout.write(struct.pack("i", n_layer))
  36. fout.write(struct.pack("i", n_embd // n_head)) # rot (obsolete)
  37. fout.write(struct.pack("i", 4))
  38. # This loop unchanged from convert-pth-to-ggml.py:
  39. for i in range(tokenizer.vocab_size()):
  40. if tokenizer.is_unknown(i):
  41. text = " \u2047 ".encode()
  42. elif tokenizer.is_control(i):
  43. text = b""
  44. elif tokenizer.is_byte(i):
  45. piece = tokenizer.id_to_piece(i)
  46. if len(piece) != 6:
  47. print(f"Invalid token: {piece}")
  48. sys.exit(1)
  49. byte_value = int(piece[3:-1], 16)
  50. text = struct.pack("B", byte_value)
  51. else:
  52. text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode()
  53. fout.write(struct.pack("i", len(text)))
  54. fout.write(text)
  55. fout.write(struct.pack("f", tokenizer.get_score(i)))
  56. def write_header(shape, dst_name, ftype_cur):
  57. sname = dst_name.encode()
  58. fout.write(struct.pack("iii", len(shape), len(sname), ftype_cur))
  59. fout.write(struct.pack("i" * len(shape), *shape[::-1]))
  60. fout.write(sname)
  61. # ensure tensor data is aligned
  62. tensor_data_offset = fout.tell()
  63. tensor_data_offset = (tensor_data_offset + 31) & -32
  64. fout.seek(tensor_data_offset)
  65. def convert_non_q4(src_name, dst_name):
  66. v = model[src_name]
  67. shape = v.shape
  68. print(f"Processing non-Q4 variable: {src_name} with shape: {shape} and type: {v.dtype}")
  69. if len(shape) == 1:
  70. print(" Converting to float32")
  71. v = v.to(torch.float32)
  72. ftype_cur = {torch.float16: 1, torch.float32: 0}[v.dtype]
  73. # header
  74. write_header(shape, dst_name, ftype_cur)
  75. # data
  76. v.numpy().tofile(fout)
  77. def convert_q4(src_name, dst_name, permute=False):
  78. zeros = model[f"{src_name}.zeros"].numpy()
  79. scales = model[f"{src_name}.scales"].numpy()
  80. bias = model[f"{src_name}.bias"].numpy()
  81. qweight = model[f"{src_name}.qweight"].numpy().T # transpose
  82. # Q4_1 does not support bias; good thing the bias is always all zeros.
  83. assert not np.any(bias)
  84. # Each int32 item is actually 8 int4 items packed together, and it's transposed.
  85. shape = (qweight.shape[0], qweight.shape[1] * 8)
  86. print(f"Processing Q4 variable: {src_name} with shape: {shape}")
  87. # The output format has the int4 weights in groups of 32 rather than 8.
  88. # It looks like this:
  89. # For each row:
  90. # For each group of 32 columns:
  91. # - addend (float32, 4 bytes)
  92. # - scale (float32, 4 bytes)
  93. # - weights (int4 * 32, 16 bytes)
  94. # Note that in the input, the scales and addends are shared between all
  95. # the columns in a row, so we end up wasting quite a bit of memory with
  96. # repeated scales and addends.
  97. addends = -zeros # flip sign
  98. # Since the output format is mixed between integers and floats, we have
  99. # to hackily view the floats as int32s just so numpy will let us
  100. # concatenate them.
  101. addends_view = addends.view(dtype=np.int32)
  102. scales_view = scales.view(dtype=np.int32)
  103. # Split into groups of 4 columns (i.e. 32 columns of quantized data):
  104. grouped = qweight.reshape([qweight.shape[0], qweight.shape[1] // 4, 4])
  105. # Repeat addends and scales:
  106. addends_rep = np.atleast_3d(addends_view).repeat(grouped.shape[1], axis=1)
  107. scales_rep = np.atleast_3d(scales_view).repeat(grouped.shape[1], axis=1)
  108. blob = np.concatenate([scales_rep, addends_rep, grouped], axis=2, casting='no')
  109. if permute:
  110. # Permute some rows to undo the permutation done by convert_llama_weights_to_hf.py.
  111. # This can be done after the above conversion because it doesn't affect column order/layout.
  112. blob = (blob.reshape(n_head, 2, shape[0] // n_head // 2, *blob.shape[1:])
  113. .swapaxes(1, 2)
  114. .reshape(blob.shape))
  115. # header
  116. write_header(shape, dst_name, 3) # ftype = Q4_1
  117. # data
  118. blob.tofile(fout)
  119. convert_non_q4("model.embed_tokens.weight", "tok_embeddings.weight")
  120. convert_non_q4("model.norm.weight", "norm.weight")
  121. convert_non_q4("lm_head.weight", "output.weight")
  122. for i in range(n_layer):
  123. convert_q4(f"model.layers.{i}.self_attn.q_proj", f"layers.{i}.attention.wq.weight", permute=True)
  124. convert_q4(f"model.layers.{i}.self_attn.k_proj", f"layers.{i}.attention.wk.weight", permute=True)
  125. convert_q4(f"model.layers.{i}.self_attn.v_proj", f"layers.{i}.attention.wv.weight")
  126. convert_q4(f"model.layers.{i}.self_attn.o_proj", f"layers.{i}.attention.wo.weight")
  127. convert_q4(f"model.layers.{i}.mlp.gate_proj", f"layers.{i}.feed_forward.w1.weight")
  128. convert_q4(f"model.layers.{i}.mlp.down_proj", f"layers.{i}.feed_forward.w2.weight")
  129. convert_q4(f"model.layers.{i}.mlp.up_proj", f"layers.{i}.feed_forward.w3.weight")
  130. convert_non_q4(f"model.layers.{i}.input_layernorm.weight", f"layers.{i}.attention_norm.weight")
  131. convert_non_q4(f"model.layers.{i}.post_attention_layernorm.weight", f"layers.{i}.ffn_norm.weight")
  132. fout.close()
  133. print(f"Done. Output file: {fname_out}")
  134. print()