|
@@ -373,6 +373,29 @@ class Model:
|
|
|
except KeyError:
|
|
except KeyError:
|
|
|
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
|
|
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
|
|
|
|
|
|
|
|
|
|
+ def does_token_look_special(self, token: str | bytes) -> bool:
|
|
|
|
|
+ if isinstance(token, (bytes, bytearray)):
|
|
|
|
|
+ token_text = token.decode(encoding="utf-8")
|
|
|
|
|
+ elif isinstance(token, memoryview):
|
|
|
|
|
+ token_text = token.tobytes().decode(encoding="utf-8")
|
|
|
|
|
+ else:
|
|
|
|
|
+ token_text = token
|
|
|
|
|
+
|
|
|
|
|
+ # Some models mark some added tokens which ought to be control tokens as not special.
|
|
|
|
|
+ # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
|
|
|
|
|
+ seems_special = token_text in (
|
|
|
|
|
+ "<pad>", # deepseek-coder
|
|
|
|
|
+ "<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
|
|
|
|
|
+ seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
|
|
|
|
|
+
|
|
|
|
|
+ # TODO: should these be marked as UNUSED instead? (maybe not)
|
|
|
|
|
+ seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
|
|
|
|
|
+
|
|
|
|
|
+ return seems_special
|
|
|
|
|
+
|
|
|
# used for GPT-2 BPE and WordPiece vocabs
|
|
# used for GPT-2 BPE and WordPiece vocabs
|
|
|
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
|
|
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
|
|
|
tokens: list[str] = []
|
|
tokens: list[str] = []
|
|
@@ -391,16 +414,18 @@ class Model:
|
|
|
for i in range(vocab_size):
|
|
for i in range(vocab_size):
|
|
|
if i not in reverse_vocab:
|
|
if i not in reverse_vocab:
|
|
|
tokens.append(f"[PAD{i}]")
|
|
tokens.append(f"[PAD{i}]")
|
|
|
- toktypes.append(gguf.TokenType.USER_DEFINED)
|
|
|
|
|
- elif reverse_vocab[i] in added_vocab:
|
|
|
|
|
- tokens.append(reverse_vocab[i])
|
|
|
|
|
- if tokenizer.added_tokens_decoder[i].special:
|
|
|
|
|
- toktypes.append(gguf.TokenType.CONTROL)
|
|
|
|
|
- else:
|
|
|
|
|
- toktypes.append(gguf.TokenType.USER_DEFINED)
|
|
|
|
|
|
|
+ toktypes.append(gguf.TokenType.UNUSED)
|
|
|
else:
|
|
else:
|
|
|
- tokens.append(reverse_vocab[i])
|
|
|
|
|
- toktypes.append(gguf.TokenType.NORMAL)
|
|
|
|
|
|
|
+ token: str = reverse_vocab[i]
|
|
|
|
|
+ if token in added_vocab:
|
|
|
|
|
+ if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
|
|
|
|
|
+ toktypes.append(gguf.TokenType.CONTROL)
|
|
|
|
|
+ else:
|
|
|
|
|
+ token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
|
|
|
|
|
+ toktypes.append(gguf.TokenType.USER_DEFINED)
|
|
|
|
|
+ else:
|
|
|
|
|
+ toktypes.append(gguf.TokenType.NORMAL)
|
|
|
|
|
+ tokens.append(token)
|
|
|
|
|
|
|
|
return tokens, toktypes, tokpre
|
|
return tokens, toktypes, tokpre
|
|
|
|
|
|
|
@@ -559,7 +584,7 @@ class Model:
|
|
|
for i in range(vocab_size):
|
|
for i in range(vocab_size):
|
|
|
if i not in reverse_vocab:
|
|
if i not in reverse_vocab:
|
|
|
tokens.append(f"[PAD{i}]")
|
|
tokens.append(f"[PAD{i}]")
|
|
|
- toktypes.append(gguf.TokenType.USER_DEFINED)
|
|
|
|
|
|
|
+ toktypes.append(gguf.TokenType.UNUSED)
|
|
|
elif reverse_vocab[i] in added_vocab:
|
|
elif reverse_vocab[i] in added_vocab:
|
|
|
tokens.append(reverse_vocab[i])
|
|
tokens.append(reverse_vocab[i])
|
|
|
toktypes.append(gguf.TokenType.CONTROL)
|
|
toktypes.append(gguf.TokenType.CONTROL)
|
|
@@ -609,7 +634,7 @@ class Model:
|
|
|
|
|
|
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
|
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
|
|
|
|
|
|
|
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
|
|
|
|
|
|
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
|
piece = tokenizer.IdToPiece(token_id)
|
|
piece = tokenizer.IdToPiece(token_id)
|
|
@@ -644,6 +669,25 @@ class Model:
|
|
|
scores[token_id] = -1000.0
|
|
scores[token_id] = -1000.0
|
|
|
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
|
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
|
|
|
|
|
|
|
|
|
+ tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
|
|
|
|
|
+ if tokenizer_config_file.is_file():
|
|
|
|
|
+ with open(tokenizer_config_file, "r", encoding="utf-8") as f:
|
|
|
|
|
+ tokenizer_config_json = json.load(f)
|
|
|
|
|
+ added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
|
|
|
|
|
+ for token_id, token_data in added_tokens_decoder.items():
|
|
|
|
|
+ token_id = int(token_id)
|
|
|
|
|
+ token: str = token_data["content"]
|
|
|
|
|
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
|
|
|
|
|
+ assert tokens[token_id] == token.encode("utf-8")
|
|
|
|
|
+ if token_data.get("special") or self.does_token_look_special(token):
|
|
|
|
|
+ toktypes[token_id] = SentencePieceTokenTypes.CONTROL
|
|
|
|
|
+ else:
|
|
|
|
|
+ token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
|
|
|
|
|
+ toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
|
|
|
|
+
|
|
|
|
|
+ scores[token_id] = -1000.0
|
|
|
|
|
+ tokens[token_id] = token.encode("utf-8")
|
|
|
|
|
+
|
|
|
if vocab_size > len(tokens):
|
|
if vocab_size > len(tokens):
|
|
|
pad_count = vocab_size - len(tokens)
|
|
pad_count = vocab_size - len(tokens)
|
|
|
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
|
|
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
|
|
@@ -1266,7 +1310,7 @@ class StableLMModel(Model):
|
|
|
if (self.dir_model / "tokenizer.json").is_file():
|
|
if (self.dir_model / "tokenizer.json").is_file():
|
|
|
self._set_vocab_gpt2()
|
|
self._set_vocab_gpt2()
|
|
|
else:
|
|
else:
|
|
|
- # StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab
|
|
|
|
|
|
|
+ # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
|
|
|
self._set_vocab_qwen()
|
|
self._set_vocab_qwen()
|
|
|
|
|
|
|
|
def set_gguf_parameters(self):
|
|
def set_gguf_parameters(self):
|
|
@@ -1578,7 +1622,6 @@ class DbrxModel(Model):
|
|
|
self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
|
|
self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
|
|
|
|
|
|
|
|
self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
|
|
self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
|
|
|
- self.gguf_writer.add_file_type(self.ftype)
|
|
|
|
|
|
|
|
|
|
self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
|
|
self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
|
|
|
self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
|
|
self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
|
|
@@ -1872,7 +1915,7 @@ class Phi3MiniModel(Model):
|
|
|
|
|
|
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
|
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
|
|
|
|
|
|
|
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
|
|
|
|
|
|
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
|
|
|
|
|
@@ -1917,7 +1960,7 @@ class Phi3MiniModel(Model):
|
|
|
for token_id, foken_data in added_tokens_decoder.items():
|
|
for token_id, foken_data in added_tokens_decoder.items():
|
|
|
token_id = int(token_id)
|
|
token_id = int(token_id)
|
|
|
token = foken_data["content"].encode("utf-8")
|
|
token = foken_data["content"].encode("utf-8")
|
|
|
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
|
|
|
|
|
|
|
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
|
|
|
assert tokens[token_id] == token
|
|
assert tokens[token_id] == token
|
|
|
tokens[token_id] = token
|
|
tokens[token_id] = token
|
|
|
scores[token_id] = -1000.0
|
|
scores[token_id] = -1000.0
|
|
@@ -1933,7 +1976,7 @@ class Phi3MiniModel(Model):
|
|
|
for foken_data in added_tokens:
|
|
for foken_data in added_tokens:
|
|
|
token_id = int(foken_data["id"])
|
|
token_id = int(foken_data["id"])
|
|
|
token = foken_data["content"].encode("utf-8")
|
|
token = foken_data["content"].encode("utf-8")
|
|
|
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
|
|
|
|
|
|
|
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
|
|
|
assert tokens[token_id] == token
|
|
assert tokens[token_id] == token
|
|
|
tokens[token_id] = token
|
|
tokens[token_id] = token
|
|
|
scores[token_id] = -1000.0
|
|
scores[token_id] = -1000.0
|
|
@@ -2145,7 +2188,7 @@ class InternLM2Model(Model):
|
|
|
toktype = SentencePieceTokenTypes.BYTE
|
|
toktype = SentencePieceTokenTypes.BYTE
|
|
|
# take care of ununsed raw token
|
|
# take care of ununsed raw token
|
|
|
if piece.startswith('[UNUSED'):
|
|
if piece.startswith('[UNUSED'):
|
|
|
- toktype = SentencePieceTokenTypes.UNKNOWN
|
|
|
|
|
|
|
+ toktype = SentencePieceTokenTypes.UNUSED
|
|
|
|
|
|
|
|
tokens.append(text)
|
|
tokens.append(text)
|
|
|
scores.append(score)
|
|
scores.append(score)
|
|
@@ -2175,7 +2218,7 @@ class InternLM2Model(Model):
|
|
|
if token == chat_eos_token:
|
|
if token == chat_eos_token:
|
|
|
chat_eos_token_id = token_id
|
|
chat_eos_token_id = token_id
|
|
|
token = token.encode("utf-8")
|
|
token = token.encode("utf-8")
|
|
|
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
|
|
|
|
|
|
|
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
|
|
|
assert(tokens[token_id] == token)
|
|
assert(tokens[token_id] == token)
|
|
|
tokens[token_id] = token
|
|
tokens[token_id] = token
|
|
|
scores[token_id] = -1000.0
|
|
scores[token_id] = -1000.0
|
|
@@ -2194,7 +2237,7 @@ class InternLM2Model(Model):
|
|
|
if token == chat_eos_token:
|
|
if token == chat_eos_token:
|
|
|
chat_eos_token_id = token_id
|
|
chat_eos_token_id = token_id
|
|
|
token = token.encode("utf-8")
|
|
token = token.encode("utf-8")
|
|
|
- if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
|
|
|
|
|
|
|
+ if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
|
|
|
assert(tokens[token_id] == token)
|
|
assert(tokens[token_id] == token)
|
|
|
tokens[token_id] = token
|
|
tokens[token_id] = token
|
|
|
scores[token_id] = -1000.0
|
|
scores[token_id] = -1000.0
|
|
@@ -2434,19 +2477,7 @@ class Gemma2Model(Model):
|
|
|
model_arch = gguf.MODEL_ARCH.GEMMA2
|
|
model_arch = gguf.MODEL_ARCH.GEMMA2
|
|
|
|
|
|
|
|
def set_vocab(self):
|
|
def set_vocab(self):
|
|
|
- tokens, scores, toktypes = self._create_vocab_sentencepiece()
|
|
|
|
|
- # hack: This is required so that we can properly use start/end-of-turn for chat template
|
|
|
|
|
- for i in range(108):
|
|
|
|
|
- # including <unusedX>, <start_of_turn>, <end_of_turn>
|
|
|
|
|
- toktypes[i] = SentencePieceTokenTypes.CONTROL
|
|
|
|
|
- self.gguf_writer.add_tokenizer_model("llama")
|
|
|
|
|
- self.gguf_writer.add_tokenizer_pre("default")
|
|
|
|
|
- self.gguf_writer.add_token_list(tokens)
|
|
|
|
|
- self.gguf_writer.add_token_scores(scores)
|
|
|
|
|
- self.gguf_writer.add_token_types(toktypes)
|
|
|
|
|
-
|
|
|
|
|
- special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
|
|
|
|
- special_vocab.add_to_gguf(self.gguf_writer)
|
|
|
|
|
|
|
+ self._set_vocab_sentencepiece()
|
|
|
|
|
|
|
|
self.gguf_writer.add_add_space_prefix(False)
|
|
self.gguf_writer.add_add_space_prefix(False)
|
|
|
|
|
|
|
@@ -2770,7 +2801,7 @@ class ArcticModel(Model):
|
|
|
|
|
|
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
|
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
|
|
|
|
|
|
|
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
|
|
|
|
|
|
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
|
|
|
|
|
@@ -3025,7 +3056,7 @@ class T5Model(Model):
|
|
|
|
|
|
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
scores: list[float] = [-10000.0] * vocab_size
|
|
|
- toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
|
|
|
|
|
|
|
+ toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
|
|
|
|
|
|
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
for token_id in range(tokenizer.vocab_size()):
|
|
|
piece = tokenizer.IdToPiece(token_id)
|
|
piece = tokenizer.IdToPiece(token_id)
|
|
@@ -3243,15 +3274,14 @@ class ChatGLMModel(Model):
|
|
|
if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
|
|
if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
|
|
|
score = tokenizer.tokenizer.sp_model.get_score(token_id)
|
|
score = tokenizer.tokenizer.sp_model.get_score(token_id)
|
|
|
|
|
|
|
|
- if len(piece) == 0:
|
|
|
|
|
- text = f"[PAD{token_id}]".encode("utf-8")
|
|
|
|
|
-
|
|
|
|
|
if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
|
|
if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
|
|
|
if piece in special_tokens:
|
|
if piece in special_tokens:
|
|
|
- # show special tokens in prompt
|
|
|
|
|
- toktype = SentencePieceTokenTypes.USER_DEFINED
|
|
|
|
|
|
|
+ toktype = SentencePieceTokenTypes.CONTROL
|
|
|
|
|
+ elif len(piece) == 0:
|
|
|
|
|
+ text = f"[PAD{token_id}]".encode("utf-8")
|
|
|
|
|
+ toktype = SentencePieceTokenTypes.UNUSED
|
|
|
else:
|
|
else:
|
|
|
- toktype = SentencePieceTokenTypes.UNKNOWN
|
|
|
|
|
|
|
+ toktype = SentencePieceTokenTypes.USER_DEFINED
|
|
|
tokens.append(text)
|
|
tokens.append(text)
|
|
|
scores.append(score)
|
|
scores.append(score)
|
|
|
toktypes.append(toktype)
|
|
toktypes.append(toktype)
|
|
@@ -3340,7 +3370,7 @@ class ChatGLMModel(Model):
|
|
|
for i in range(vocab_size):
|
|
for i in range(vocab_size):
|
|
|
if i not in reverse_vocab:
|
|
if i not in reverse_vocab:
|
|
|
tokens.append(f"[PAD{i}]")
|
|
tokens.append(f"[PAD{i}]")
|
|
|
- toktypes.append(gguf.TokenType.USER_DEFINED)
|
|
|
|
|
|
|
+ toktypes.append(gguf.TokenType.UNUSED)
|
|
|
elif reverse_vocab[i] in added_vocab:
|
|
elif reverse_vocab[i] in added_vocab:
|
|
|
tokens.append(reverse_vocab[i])
|
|
tokens.append(reverse_vocab[i])
|
|
|
if tokenizer.added_tokens_decoder[i].special:
|
|
if tokenizer.added_tokens_decoder[i].special:
|