|
|
@@ -146,6 +146,7 @@ class TensorNameMap:
|
|
|
# Attention query
|
|
|
MODEL_TENSOR.ATTN_Q: (
|
|
|
"model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2
|
|
|
+ "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom
|
|
|
"layers.{bid}.attention.wq", # llama-pth
|
|
|
"encoder.layer.{bid}.attention.self.query", # bert
|
|
|
"transformer.h.{bid}.attn.q_proj", # gpt-j
|
|
|
@@ -158,6 +159,7 @@ class TensorNameMap:
|
|
|
# Attention key
|
|
|
MODEL_TENSOR.ATTN_K: (
|
|
|
"model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2
|
|
|
+ "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom
|
|
|
"layers.{bid}.attention.wk", # llama-pth
|
|
|
"encoder.layer.{bid}.attention.self.key", # bert
|
|
|
"transformer.h.{bid}.attn.k_proj", # gpt-j
|