Jelajahi Sumber

Remove .attention from skipped tensors to match more accurately (#7051)

Bartowski 1 tahun lalu
induk
melakukan
60325fa56f
1 mengubah file dengan 1 tambahan dan 1 penghapusan
  1. 1 1
      convert-hf-to-gguf.py

+ 1 - 1
convert-hf-to-gguf.py

@@ -1427,7 +1427,7 @@ class LlamaModel(Model):
         experts = dict()
         for name, data_torch in self.get_tensors():
             # we don't need these
-            if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
+            if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
                 continue
 
             old_dtype = data_torch.dtype