tensor_mapping.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. from __future__ import annotations
  2. from typing import Sequence
  3. from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
  4. class TensorNameMap:
  5. mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  6. # Token embeddings
  7. MODEL_TENSOR.TOKEN_EMBD: (
  8. "gpt_neox.embed_in", # gptneox
  9. "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone
  10. "transformer.word_embeddings", # falcon
  11. "word_embeddings", # bloom
  12. "model.embed_tokens", # llama-hf nemotron
  13. "tok_embeddings", # llama-pth
  14. "embeddings.word_embeddings", # bert nomic-bert
  15. "language_model.embedding.word_embeddings", # persimmon
  16. "wte", # gpt2
  17. "transformer.embd.wte", # phi2
  18. "model.tok_embeddings", # internlm2
  19. "model.embedding", # mamba-qbert
  20. "backbone.embedding", # mamba
  21. "backbone.embeddings", # mamba-hf
  22. "transformer.in_out_embed", # Grok
  23. "embedding.word_embeddings", # chatglm
  24. "transformer.token_embeddings", # openelm
  25. "shared", # t5
  26. ),
  27. # Token type embeddings
  28. MODEL_TENSOR.TOKEN_TYPES: (
  29. "embeddings.token_type_embeddings", # bert nomic-bert
  30. ),
  31. # Normalization of token embeddings
  32. MODEL_TENSOR.TOKEN_EMBD_NORM: (
  33. "word_embeddings_layernorm", # bloom
  34. "embeddings.LayerNorm", # bert
  35. "emb_ln", # nomic-bert
  36. "transformer.norm", # openelm
  37. ),
  38. # Position embeddings
  39. MODEL_TENSOR.POS_EMBD: (
  40. "transformer.wpe", # gpt2
  41. "embeddings.position_embeddings", # bert
  42. "wpe", # gpt2
  43. ),
  44. # Output
  45. MODEL_TENSOR.OUTPUT: (
  46. "embed_out", # gptneox
  47. "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone
  48. "output", # llama-pth bloom internlm2
  49. "word_embeddings_for_head", # persimmon
  50. "lm_head.linear", # phi2
  51. "output_layer", # chatglm
  52. ),
  53. # Output norm
  54. MODEL_TENSOR.OUTPUT_NORM: (
  55. "gpt_neox.final_layer_norm", # gptneox
  56. "transformer.ln_f", # gpt2 gpt-j falcon jais exaone
  57. "model.norm", # llama-hf baichuan internlm2
  58. "norm", # llama-pth
  59. "transformer.norm_f", # mpt dbrx
  60. "ln_f", # refact bloom qwen gpt2
  61. "language_model.encoder.final_layernorm", # persimmon
  62. "model.final_layernorm", # persimmon
  63. "lm_head.ln", # phi2
  64. "model.norm_f", # mamba-qbert
  65. "backbone.norm_f", # mamba
  66. "transformer.rms_norm", # Grok
  67. "encoder.final_layernorm", # chatglm
  68. "transformer.norm", # openelm
  69. "model.norm", # nemotron
  70. ),
  71. # Rope frequencies
  72. MODEL_TENSOR.ROPE_FREQS: (
  73. "rope.freqs", # llama-pth
  74. "rotary_pos_emb.inv_freq", # chatglm
  75. ),
  76. }
  77. block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  78. # Attention norm
  79. MODEL_TENSOR.ATTN_NORM: (
  80. "gpt_neox.layers.{bid}.input_layernorm", # gptneox
  81. "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone
  82. "transformer.blocks.{bid}.norm_1", # mpt
  83. "transformer.h.{bid}.input_layernorm", # falcon7b
  84. "h.{bid}.input_layernorm", # bloom
  85. "transformer.h.{bid}.ln_mlp", # falcon40b
  86. "model.layers.{bid}.input_layernorm", # llama-hf nemotron
  87. "layers.{bid}.attention_norm", # llama-pth
  88. "language_model.encoder.layers.{bid}.input_layernorm", # persimmon
  89. "model.layers.{bid}.ln1", # yi
  90. "h.{bid}.ln_1", # gpt2
  91. "transformer.h.{bid}.ln", # phi2
  92. "model.layers.layers.{bid}.norm", # plamo
  93. "model.layers.{bid}.attention_norm", # internlm2
  94. "model.layers.{bid}.norm", # mamba-qbert
  95. "backbone.layers.{bid}.norm", # mamba
  96. "transformer.decoder_layer.{bid}.rms_norm", # Grok
  97. "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
  98. "encoder.layers.{bid}.input_layernorm", # chatglm
  99. "transformer.layers.{bid}.attn_norm", # openelm
  100. ),
  101. # Attention norm 2
  102. MODEL_TENSOR.ATTN_NORM_2: (
  103. "transformer.h.{bid}.ln_attn", # falcon40b
  104. "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
  105. ),
  106. # Attention query-key-value
  107. MODEL_TENSOR.ATTN_QKV: (
  108. "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
  109. "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
  110. "transformer.blocks.{bid}.attn.Wqkv", # mpt
  111. "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
  112. "transformer.h.{bid}.self_attention.query_key_value", # falcon
  113. "h.{bid}.self_attention.query_key_value", # bloom
  114. "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
  115. "model.layers.{bid}.self_attn.query_key_value", # persimmon
  116. "h.{bid}.attn.c_attn", # gpt2
  117. "transformer.h.{bid}.mixer.Wqkv", # phi2
  118. "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
  119. "model.layers.{bid}.self_attn.qkv_proj", # phi3
  120. "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
  121. "transformer.layers.{bid}.attn.qkv_proj", # openelm
  122. ),
  123. # Attention query
  124. MODEL_TENSOR.ATTN_Q: (
  125. "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron
  126. "layers.{bid}.attention.wq", # llama-pth
  127. "encoder.layer.{bid}.attention.self.query", # bert
  128. "transformer.h.{bid}.attn.q_proj", # gpt-j
  129. "model.layers.layers.{bid}.self_attn.q_proj", # plamo
  130. "model.layers.{bid}.attention.wq", # internlm2
  131. "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
  132. "transformer.h.{bid}.attn.attention.q_proj", # exaone
  133. ),
  134. # Attention key
  135. MODEL_TENSOR.ATTN_K: (
  136. "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron
  137. "layers.{bid}.attention.wk", # llama-pth
  138. "encoder.layer.{bid}.attention.self.key", # bert
  139. "transformer.h.{bid}.attn.k_proj", # gpt-j
  140. "transformer.h.{bid}.attn.k", # refact
  141. "model.layers.layers.{bid}.self_attn.k_proj", # plamo
  142. "model.layers.{bid}.attention.wk", # internlm2
  143. "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
  144. "transformer.h.{bid}.attn.attention.k_proj", # exaone
  145. ),
  146. # Attention value
  147. MODEL_TENSOR.ATTN_V: (
  148. "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron
  149. "layers.{bid}.attention.wv", # llama-pth
  150. "encoder.layer.{bid}.attention.self.value", # bert
  151. "transformer.h.{bid}.attn.v_proj", # gpt-j
  152. "transformer.h.{bid}.attn.v", # refact
  153. "model.layers.layers.{bid}.self_attn.v_proj", # plamo
  154. "model.layers.{bid}.attention.wv", # internlm2
  155. "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
  156. "transformer.h.{bid}.attn.attention.v_proj", # exaone
  157. ),
  158. # Attention output
  159. MODEL_TENSOR.ATTN_OUT: (
  160. "gpt_neox.layers.{bid}.attention.dense", # gptneox
  161. "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
  162. "transformer.blocks.{bid}.attn.out_proj", # mpt
  163. "transformer.h.{bid}.self_attention.dense", # falcon
  164. "h.{bid}.self_attention.dense", # bloom
  165. "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron
  166. "layers.{bid}.attention.wo", # llama-pth
  167. "encoder.layer.{bid}.attention.output.dense", # bert
  168. "transformer.h.{bid}.attn.out_proj", # gpt-j
  169. "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
  170. "model.layers.{bid}.self_attn.dense", # persimmon
  171. "h.{bid}.attn.c_proj", # gpt2
  172. "transformer.h.{bid}.mixer.out_proj", # phi2
  173. "model.layers.layers.{bid}.self_attn.o_proj", # plamo
  174. "model.layers.{bid}.attention.wo", # internlm2
  175. "encoder.layers.{bid}.attn.out_proj", # nomic-bert
  176. "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
  177. "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
  178. "encoder.layers.{bid}.self_attention.dense", # chatglm
  179. "transformer.layers.{bid}.attn.out_proj", # openelm
  180. "transformer.h.{bid}.attn.attention.out_proj", # exaone
  181. ),
  182. # Attention output norm
  183. MODEL_TENSOR.ATTN_OUT_NORM: (
  184. "encoder.layer.{bid}.attention.output.LayerNorm", # bert
  185. "encoder.layers.{bid}.norm1", # nomic-bert
  186. "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
  187. "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
  188. ),
  189. MODEL_TENSOR.ATTN_POST_NORM: (
  190. "model.layers.{bid}.post_attention_layernorm", # gemma2
  191. ),
  192. # Rotary embeddings
  193. MODEL_TENSOR.ATTN_ROT_EMBD: (
  194. "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
  195. "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
  196. "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
  197. "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
  198. ),
  199. # Feed-forward norm
  200. MODEL_TENSOR.FFN_NORM: (
  201. "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
  202. "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone
  203. "h.{bid}.post_attention_layernorm", # bloom
  204. "transformer.blocks.{bid}.norm_2", # mpt
  205. "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron
  206. "layers.{bid}.ffn_norm", # llama-pth
  207. "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
  208. "model.layers.{bid}.ln2", # yi
  209. "h.{bid}.ln_2", # gpt2
  210. "model.layers.{bid}.ffn_norm", # internlm2
  211. "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
  212. "encoder.layers.{bid}.post_attention_layernorm", # chatglm
  213. "transformer.layers.{bid}.ffn_norm", # openelm
  214. ),
  215. # Post feed-forward norm
  216. MODEL_TENSOR.FFN_PRE_NORM: (
  217. "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
  218. ),
  219. # Post feed-forward norm
  220. MODEL_TENSOR.FFN_POST_NORM: (
  221. "model.layers.{bid}.post_feedforward_layernorm", # gemma2
  222. ),
  223. MODEL_TENSOR.FFN_GATE_INP: (
  224. "layers.{bid}.feed_forward.gate", # mixtral
  225. "model.layers.{bid}.block_sparse_moe.gate", # mixtral
  226. "model.layers.{bid}.mlp.gate", # qwen2moe
  227. "transformer.decoder_layer.{bid}.router", # Grok
  228. "transformer.blocks.{bid}.ffn.router.layer", # dbrx
  229. ),
  230. MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
  231. "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
  232. ),
  233. # Feed-forward up
  234. MODEL_TENSOR.FFN_UP: (
  235. "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
  236. "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
  237. "transformer.blocks.{bid}.ffn.up_proj", # mpt
  238. "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
  239. "h.{bid}.mlp.dense_h_to_4h", # bloom
  240. "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron
  241. "layers.{bid}.feed_forward.w3", # llama-pth
  242. "encoder.layer.{bid}.intermediate.dense", # bert
  243. "transformer.h.{bid}.mlp.fc_in", # gpt-j
  244. "transformer.h.{bid}.mlp.linear_3", # refact
  245. "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  246. "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  247. "transformer.h.{bid}.mlp.w1", # qwen
  248. "h.{bid}.mlp.c_fc", # gpt2
  249. "transformer.h.{bid}.mlp.fc1", # phi2
  250. "model.layers.{bid}.mlp.fc1", # phi2
  251. "model.layers.{bid}.mlp.gate_up_proj", # phi3
  252. "model.layers.layers.{bid}.mlp.up_proj", # plamo
  253. "model.layers.{bid}.feed_forward.w3", # internlm2
  254. "encoder.layers.{bid}.mlp.fc11", # nomic-bert
  255. "model.layers.{bid}.mlp.c_fc", # starcoder2
  256. "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
  257. "model.layers.{bid}.residual_mlp.w3", # arctic
  258. "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
  259. "transformer.h.{bid}.mlp.c_fc_1", # exaone
  260. ),
  261. MODEL_TENSOR.FFN_UP_EXP: (
  262. "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
  263. "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
  264. "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
  265. "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe (merged)
  266. ),
  267. MODEL_TENSOR.FFN_UP_SHEXP: (
  268. "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
  269. "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2
  270. ),
  271. # AWQ-activation gate
  272. MODEL_TENSOR.FFN_ACT: (
  273. "transformer.blocks.{bid}.ffn.act", # mpt
  274. ),
  275. # Feed-forward gate
  276. MODEL_TENSOR.FFN_GATE: (
  277. "model.layers.{bid}.mlp.gate_proj", # llama-hf refact
  278. "layers.{bid}.feed_forward.w1", # llama-pth
  279. "transformer.h.{bid}.mlp.w2", # qwen
  280. "transformer.h.{bid}.mlp.c_fc2", # jais
  281. "model.layers.layers.{bid}.mlp.gate_proj", # plamo
  282. "model.layers.{bid}.feed_forward.w1", # internlm2
  283. "encoder.layers.{bid}.mlp.fc12", # nomic-bert
  284. "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
  285. "transformer.h.{bid}.mlp.linear_1", # refact
  286. "model.layers.{bid}.residual_mlp.w1", # arctic
  287. "transformer.h.{bid}.mlp.c_fc_0", # exaone
  288. ),
  289. MODEL_TENSOR.FFN_GATE_EXP: (
  290. "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
  291. "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
  292. "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
  293. "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe (merged)
  294. ),
  295. MODEL_TENSOR.FFN_GATE_SHEXP: (
  296. "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
  297. "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2
  298. ),
  299. # Feed-forward down
  300. MODEL_TENSOR.FFN_DOWN: (
  301. "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
  302. "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
  303. "transformer.blocks.{bid}.ffn.down_proj", # mpt
  304. "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
  305. "h.{bid}.mlp.dense_4h_to_h", # bloom
  306. "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron
  307. "layers.{bid}.feed_forward.w2", # llama-pth
  308. "encoder.layer.{bid}.output.dense", # bert
  309. "transformer.h.{bid}.mlp.fc_out", # gpt-j
  310. "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  311. "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  312. "h.{bid}.mlp.c_proj", # gpt2
  313. "transformer.h.{bid}.mlp.fc2", # phi2
  314. "model.layers.{bid}.mlp.fc2", # phi2
  315. "model.layers.layers.{bid}.mlp.down_proj", # plamo
  316. "model.layers.{bid}.feed_forward.w2", # internlm2
  317. "encoder.layers.{bid}.mlp.fc2", # nomic-bert
  318. "model.layers.{bid}.mlp.c_proj", # starcoder2
  319. "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
  320. "transformer.layers.{bid}.ffn.proj_2", # openelm
  321. "model.layers.{bid}.residual_mlp.w2", # arctic
  322. "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
  323. "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
  324. "model.layers.h.{bid}.mlp.c_proj", # exaone
  325. ),
  326. MODEL_TENSOR.FFN_DOWN_EXP: (
  327. "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
  328. "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
  329. "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
  330. "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe (merged)
  331. ),
  332. MODEL_TENSOR.FFN_DOWN_SHEXP: (
  333. "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
  334. "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2
  335. ),
  336. MODEL_TENSOR.ATTN_Q_NORM: (
  337. "language_model.encoder.layers.{bid}.self_attention.q_layernorm",
  338. "model.layers.{bid}.self_attn.q_layernorm", # persimmon
  339. "model.layers.{bid}.self_attn.q_norm", # cohere
  340. "transformer.blocks.{bid}.attn.q_ln", # sea-lion
  341. "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
  342. "transformer.layers.{bid}.attn.q_norm", # openelm
  343. ),
  344. MODEL_TENSOR.ATTN_K_NORM: (
  345. "language_model.encoder.layers.{bid}.self_attention.k_layernorm",
  346. "model.layers.{bid}.self_attn.k_layernorm", # persimmon
  347. "model.layers.{bid}.self_attn.k_norm", # cohere
  348. "transformer.blocks.{bid}.attn.k_ln", # sea-lion
  349. "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
  350. "transformer.layers.{bid}.attn.k_norm", # openelm
  351. ),
  352. MODEL_TENSOR.ROPE_FREQS: (
  353. "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
  354. ),
  355. MODEL_TENSOR.LAYER_OUT_NORM: (
  356. "encoder.layer.{bid}.output.LayerNorm", # bert
  357. "encoder.layers.{bid}.norm2", # nomic-bert
  358. "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
  359. "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
  360. "encoder.layer.{bid}.layer_norm_2" # jina-v2-code
  361. ),
  362. MODEL_TENSOR.SSM_IN: (
  363. "model.layers.{bid}.in_proj",
  364. "backbone.layers.{bid}.mixer.in_proj",
  365. ),
  366. MODEL_TENSOR.SSM_CONV1D: (
  367. "model.layers.{bid}.conv1d",
  368. "backbone.layers.{bid}.mixer.conv1d",
  369. ),
  370. MODEL_TENSOR.SSM_X: (
  371. "model.layers.{bid}.x_proj",
  372. "backbone.layers.{bid}.mixer.x_proj",
  373. ),
  374. MODEL_TENSOR.SSM_DT: (
  375. "model.layers.{bid}.dt_proj",
  376. "backbone.layers.{bid}.mixer.dt_proj",
  377. ),
  378. MODEL_TENSOR.SSM_A: (
  379. "model.layers.{bid}.A_log",
  380. "backbone.layers.{bid}.mixer.A_log",
  381. ),
  382. MODEL_TENSOR.SSM_D: (
  383. "model.layers.{bid}.D",
  384. "backbone.layers.{bid}.mixer.D",
  385. ),
  386. MODEL_TENSOR.SSM_OUT: (
  387. "model.layers.{bid}.out_proj",
  388. "backbone.layers.{bid}.mixer.out_proj",
  389. ),
  390. MODEL_TENSOR.ATTN_Q_A: (
  391. "model.layers.{bid}.self_attn.q_a_proj", # deepseek2
  392. ),
  393. MODEL_TENSOR.ATTN_Q_B: (
  394. "model.layers.{bid}.self_attn.q_b_proj", # deepseek2
  395. ),
  396. MODEL_TENSOR.ATTN_KV_A_MQA: (
  397. "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
  398. ),
  399. MODEL_TENSOR.ATTN_KV_B: (
  400. "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
  401. ),
  402. MODEL_TENSOR.ATTN_Q_A_NORM: (
  403. "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
  404. ),
  405. MODEL_TENSOR.ATTN_KV_A_NORM: (
  406. "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
  407. ),
  408. MODEL_TENSOR.ATTN_SUB_NORM: (
  409. "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
  410. ),
  411. MODEL_TENSOR.FFN_SUB_NORM: (
  412. "model.layers.{bid}.mlp.ffn_layernorm", # bitnet
  413. ),
  414. MODEL_TENSOR.DEC_ATTN_NORM: (
  415. "decoder.block.{bid}.layer.0.layer_norm", # t5
  416. ),
  417. MODEL_TENSOR.DEC_ATTN_Q: (
  418. "decoder.block.{bid}.layer.0.SelfAttention.q", # t5
  419. ),
  420. MODEL_TENSOR.DEC_ATTN_K: (
  421. "decoder.block.{bid}.layer.0.SelfAttention.k", # t5
  422. ),
  423. MODEL_TENSOR.DEC_ATTN_V: (
  424. "decoder.block.{bid}.layer.0.SelfAttention.v", # t5
  425. ),
  426. MODEL_TENSOR.DEC_ATTN_OUT: (
  427. "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
  428. ),
  429. MODEL_TENSOR.DEC_ATTN_REL_B: (
  430. "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  431. ),
  432. MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
  433. "decoder.block.{bid}.layer.1.layer_norm", # t5
  434. ),
  435. MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
  436. "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
  437. ),
  438. MODEL_TENSOR.DEC_CROSS_ATTN_K: (
  439. "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
  440. ),
  441. MODEL_TENSOR.DEC_CROSS_ATTN_V: (
  442. "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
  443. ),
  444. MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
  445. "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
  446. ),
  447. MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
  448. "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
  449. ),
  450. MODEL_TENSOR.DEC_FFN_NORM: (
  451. "decoder.block.{bid}.layer.2.layer_norm", # t5
  452. ),
  453. MODEL_TENSOR.DEC_FFN_GATE: (
  454. "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
  455. ),
  456. MODEL_TENSOR.DEC_FFN_UP: (
  457. "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
  458. "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
  459. ),
  460. MODEL_TENSOR.DEC_FFN_DOWN: (
  461. "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
  462. ),
  463. MODEL_TENSOR.DEC_OUTPUT_NORM: (
  464. "decoder.final_layer_norm", # t5
  465. ),
  466. MODEL_TENSOR.ENC_ATTN_NORM: (
  467. "encoder.block.{bid}.layer.0.layer_norm", # t5
  468. ),
  469. MODEL_TENSOR.ENC_ATTN_Q: (
  470. "encoder.block.{bid}.layer.0.SelfAttention.q", # t5
  471. ),
  472. MODEL_TENSOR.ENC_ATTN_K: (
  473. "encoder.block.{bid}.layer.0.SelfAttention.k", # t5
  474. ),
  475. MODEL_TENSOR.ENC_ATTN_V: (
  476. "encoder.block.{bid}.layer.0.SelfAttention.v", # t5
  477. ),
  478. MODEL_TENSOR.ENC_ATTN_OUT: (
  479. "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
  480. ),
  481. MODEL_TENSOR.ENC_ATTN_REL_B: (
  482. "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  483. ),
  484. MODEL_TENSOR.ENC_FFN_NORM: (
  485. "encoder.block.{bid}.layer.1.layer_norm", # t5
  486. ),
  487. MODEL_TENSOR.ENC_FFN_GATE: (
  488. "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
  489. ),
  490. MODEL_TENSOR.ENC_FFN_UP: (
  491. "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
  492. "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
  493. ),
  494. MODEL_TENSOR.ENC_FFN_DOWN: (
  495. "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
  496. ),
  497. MODEL_TENSOR.ENC_OUTPUT_NORM: (
  498. "encoder.final_layer_norm", # t5
  499. ),
  500. }
  501. # architecture-specific block mappings
  502. arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
  503. MODEL_ARCH.ARCTIC: {
  504. MODEL_TENSOR.FFN_NORM: (
  505. "model.layers.{bid}.residual_layernorm",
  506. ),
  507. MODEL_TENSOR.FFN_NORM_EXP: (
  508. "model.layers.{bid}.post_attention_layernorm",
  509. ),
  510. },
  511. }
  512. mapping: dict[str, tuple[MODEL_TENSOR, str]]
  513. def __init__(self, arch: MODEL_ARCH, n_blocks: int):
  514. self.mapping = {}
  515. for tensor, keys in self.mappings_cfg.items():
  516. if tensor not in MODEL_TENSORS[arch]:
  517. continue
  518. tensor_name = TENSOR_NAMES[tensor]
  519. self.mapping[tensor_name] = (tensor, tensor_name)
  520. for key in keys:
  521. self.mapping[key] = (tensor, tensor_name)
  522. if arch in self.arch_block_mappings_cfg:
  523. self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
  524. for bid in range(n_blocks):
  525. for tensor, keys in self.block_mappings_cfg.items():
  526. if tensor not in MODEL_TENSORS[arch]:
  527. continue
  528. tensor_name = TENSOR_NAMES[tensor].format(bid = bid)
  529. self.mapping[tensor_name] = (tensor, tensor_name)
  530. for key in keys:
  531. key = key.format(bid = bid)
  532. self.mapping[key] = (tensor, tensor_name)
  533. def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
  534. result = self.mapping.get(key)
  535. if result is not None:
  536. return result
  537. for suffix in try_suffixes:
  538. if key.endswith(suffix):
  539. result = self.mapping.get(key[:-len(suffix)])
  540. if result is not None:
  541. return result[0], result[1] + suffix
  542. return None
  543. def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
  544. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  545. if result is None:
  546. return None
  547. return result[1]
  548. def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
  549. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  550. if result is None:
  551. return None
  552. return result[0]
  553. def __getitem__(self, key: str) -> str:
  554. try:
  555. return self.mapping[key][1]
  556. except KeyError:
  557. raise KeyError(key)
  558. def __contains__(self, key: str) -> bool:
  559. return key in self.mapping
  560. def __repr__(self) -> str:
  561. return repr(self.mapping)
  562. def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
  563. return TensorNameMap(arch, n_blocks)