|
|
@@ -2458,6 +2458,12 @@ class GemmaModel(Model):
|
|
|
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
|
|
|
|
|
|
for name, data_torch in self.get_tensors():
|
|
|
+ # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
|
|
|
+ # To prevent errors, skip loading lm_head.weight.
|
|
|
+ if name == "lm_head.weight":
|
|
|
+ print(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
|
|
|
+ continue
|
|
|
+
|
|
|
old_dtype = data_torch.dtype
|
|
|
|
|
|
# convert any unsupported data types to float32
|