Forráskód Böngészése

convert : add support of codeqwen due to tokenizer (#6707)

* add support of codeqwen due to tokenizer

* override load_hparams

* fix typo

* fix load_params

* convert : fix whitespace

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Junyang Lin 1 éve
szülő
commit
3fec68be4e
1 módosított fájl, 16 hozzáadás és 0 törlés
  1. 16 0
      convert-hf-to-gguf.py

+ 16 - 0
convert-hf-to-gguf.py

@@ -363,6 +363,16 @@ class Model(ABC):
                         scores.append(-1000.0)
                         toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
 
+        if vocab_size > len(tokens):
+            pad_count = vocab_size - len(tokens)
+            print(
+                f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]"
+            )
+            for i in range(1, pad_count + 1):
+                tokens.append(f"[PAD{i}]")
+                scores.append(-1000.0)
+                toktypes.append(SentencePieceTokenTypes.UNUSED)
+
         assert len(tokens) == vocab_size
 
         self.gguf_writer.add_tokenizer_model("llama")
@@ -1789,6 +1799,12 @@ class QwenModel(Model):
 class Qwen2Model(Model):
     model_arch = gguf.MODEL_ARCH.QWEN2
 
+    def set_vocab(self):
+        try:
+            self._set_vocab_sentencepiece()
+        except FileNotFoundError:
+            self._set_vocab_gpt2()
+
 
 @Model.register("Qwen2MoeForCausalLM")
 class Qwen2MoeModel(Model):