Просмотр исходного кода

convert : add BPE pre-tokenization for DBRX (#7132)

* Add BPE pre-tokenization for DBRX.

* Add vocab GGUFs.

* Remove test.

* Remove GGUFs.
DAN™ 1 год назад
Родитель
Сommit
4cd621c26d
4 измененных файлов с 9 добавлено и 0 удалено
  1. 1 0
      convert-hf-to-gguf-update.py
  2. 3 0
      convert-hf-to-gguf.py
  3. 4 0
      llama.cpp
  4. 1 0
      llama.h

+ 1 - 0
convert-hf-to-gguf-update.py

@@ -68,6 +68,7 @@ models = [
     {"name": "refact",         "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
     {"name": "command-r",      "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
     {"name": "olmo",           "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
+    {"name": "dbrx",           "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
 ]
 
 # make directory "models/tokenizers" if it doesn't exist

+ 3 - 0
convert-hf-to-gguf.py

@@ -317,6 +317,9 @@ class Model(ABC):
         if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
             # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
             res = "olmo"
+        if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
+            # ref: https://huggingface.co/databricks/dbrx-instruct
+            res = "dbrx"
 
         if res is None:
             logger.warning("\n")

+ 4 - 0
llama.cpp

@@ -4394,6 +4394,9 @@ static void llm_load_vocab(
             } else if (
                 tokenizer_pre == "olmo") {
                 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
+            } else if (
+                tokenizer_pre == "dbrx") {
+                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
             } else {
                 throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
             }
@@ -12200,6 +12203,7 @@ struct llm_tokenizer_bpe {
             case LLAMA_VOCAB_TYPE_BPE:
                 switch (vocab.type_pre) {
                     case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
+                    case LLAMA_VOCAB_PRE_TYPE_DBRX:
                         word_collection = unicode_regex_split(text, {
                             // original regex from tokenizer.json
                             //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",

+ 1 - 0
llama.h

@@ -82,6 +82,7 @@ extern "C" {
         LLAMA_VOCAB_PRE_TYPE_REFACT         = 8,
         LLAMA_VOCAB_PRE_TYPE_COMMAND_R      = 9,
         LLAMA_VOCAB_PRE_TYPE_OLMO           = 10,
+        LLAMA_VOCAB_PRE_TYPE_DBRX           = 11,
     };
 
     // note: these values should be synchronized with ggml_rope