Browse Source

convert : add custom attention mapping

Georgi Gerganov 1 year ago
parent
commit
c5ede3849f
1 changed files with 2 additions and 0 deletions
  1. 2 0
      gguf-py/gguf/tensor_mapping.py

+ 2 - 0
gguf-py/gguf/tensor_mapping.py

@@ -146,6 +146,7 @@ class TensorNameMap:
         # Attention query
         # Attention query
         MODEL_TENSOR.ATTN_Q: (
         MODEL_TENSOR.ATTN_Q: (
             "model.layers.{bid}.self_attn.q_proj",                       # llama-hf nemotron olmoe olmo2
             "model.layers.{bid}.self_attn.q_proj",                       # llama-hf nemotron olmoe olmo2
+            "model.layers.{bid}.self_attn.q_proj_no_perm",               # llama-custom
             "layers.{bid}.attention.wq",                                 # llama-pth
             "layers.{bid}.attention.wq",                                 # llama-pth
             "encoder.layer.{bid}.attention.self.query",                  # bert
             "encoder.layer.{bid}.attention.self.query",                  # bert
             "transformer.h.{bid}.attn.q_proj",                           # gpt-j
             "transformer.h.{bid}.attn.q_proj",                           # gpt-j
@@ -158,6 +159,7 @@ class TensorNameMap:
         # Attention key
         # Attention key
         MODEL_TENSOR.ATTN_K: (
         MODEL_TENSOR.ATTN_K: (
             "model.layers.{bid}.self_attn.k_proj",                     # llama-hf nemotron olmoe olmo2
             "model.layers.{bid}.self_attn.k_proj",                     # llama-hf nemotron olmoe olmo2
+            "model.layers.{bid}.self_attn.k_proj_no_perm",             # llama-custom
             "layers.{bid}.attention.wk",                               # llama-pth
             "layers.{bid}.attention.wk",                               # llama-pth
             "encoder.layer.{bid}.attention.self.key",                  # bert
             "encoder.layer.{bid}.attention.self.key",                  # bert
             "transformer.h.{bid}.attn.k_proj",                         # gpt-j
             "transformer.h.{bid}.attn.k_proj",                         # gpt-j