|
@@ -2200,6 +2200,15 @@ class Phi3MiniModel(Model):
|
|
|
model_arch = gguf.MODEL_ARCH.PHI3
|
|
model_arch = gguf.MODEL_ARCH.PHI3
|
|
|
|
|
|
|
|
def set_vocab(self):
|
|
def set_vocab(self):
|
|
|
|
|
+ # Phi-4 model uses GPT2Tokenizer
|
|
|
|
|
+ tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
|
|
|
|
|
+ if tokenizer_config_file.is_file():
|
|
|
|
|
+ with open(tokenizer_config_file, "r", encoding="utf-8") as f:
|
|
|
|
|
+ tokenizer_config_json = json.load(f)
|
|
|
|
|
+ tokenizer_class = tokenizer_config_json['tokenizer_class']
|
|
|
|
|
+ if tokenizer_class == 'GPT2Tokenizer':
|
|
|
|
|
+ return self._set_vocab_gpt2()
|
|
|
|
|
+
|
|
|
from sentencepiece import SentencePieceProcessor
|
|
from sentencepiece import SentencePieceProcessor
|
|
|
|
|
|
|
|
tokenizer_path = self.dir_model / 'tokenizer.model'
|
|
tokenizer_path = self.dir_model / 'tokenizer.model'
|
|
@@ -2316,7 +2325,11 @@ class Phi3MiniModel(Model):
|
|
|
self.gguf_writer.add_rope_dimension_count(rope_dims)
|
|
self.gguf_writer.add_rope_dimension_count(rope_dims)
|
|
|
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
|
|
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
|
|
|
self.gguf_writer.add_file_type(self.ftype)
|
|
self.gguf_writer.add_file_type(self.ftype)
|
|
|
- self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"]))
|
|
|
|
|
|
|
+ sliding_window = self.hparams.get("sliding_window")
|
|
|
|
|
+ # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
|
|
|
|
|
+ if sliding_window is None:
|
|
|
|
|
+ sliding_window = 0
|
|
|
|
|
+ self.gguf_writer.add_sliding_window(sliding_window)
|
|
|
|
|
|
|
|
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
|
|
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
|
|
|
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|
|
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|