tensor_mapping.py 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. from __future__ import annotations
  2. from typing import Sequence
  3. from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
  4. class TensorNameMap:
  5. mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  6. # Token embeddings
  7. MODEL_TENSOR.TOKEN_EMBD: (
  8. "gpt_neox.embed_in", # gptneox
  9. "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone
  10. "transformer.word_embeddings", # falcon
  11. "word_embeddings", # bloom
  12. "model.embed_tokens", # llama-hf nemotron olmoe olmo2 rwkv6qwen2 glm4-0414
  13. "tok_embeddings", # llama-pth
  14. "embeddings.word_embeddings", # bert nomic-bert
  15. "language_model.embedding.word_embeddings", # persimmon
  16. "wte", # gpt2
  17. "transformer.embd.wte", # phi2
  18. "model.tok_embeddings", # internlm2
  19. "model.embedding", # mamba-qbert
  20. "backbone.embedding", # mamba
  21. "backbone.embeddings", # mamba-hf
  22. "transformer.in_out_embed", # Grok
  23. "embedding.word_embeddings", # chatglm
  24. "transformer.token_embeddings", # openelm
  25. "shared", # t5
  26. "rwkv.embeddings", # rwkv6
  27. "model.embeddings", # rwkv7
  28. "model.word_embeddings", # bailingmoe
  29. "language_model.model.embed_tokens", # llama4
  30. ),
  31. # Token type embeddings
  32. MODEL_TENSOR.TOKEN_TYPES: (
  33. "embeddings.token_type_embeddings", # bert nomic-bert
  34. ),
  35. # Normalization of token embeddings
  36. MODEL_TENSOR.TOKEN_EMBD_NORM: (
  37. "word_embeddings_layernorm", # bloom
  38. "embeddings.LayerNorm", # bert
  39. "emb_ln", # nomic-bert
  40. "transformer.norm", # openelm
  41. "rwkv.blocks.0.pre_ln", # rwkv
  42. "rwkv.blocks.0.pre_ln", # rwkv6
  43. "model.pre_ln", # rwkv7
  44. "model.layers.0.pre_norm", # rwkv7
  45. "backbone.norm", # wavtokenizer
  46. ),
  47. # Position embeddings
  48. MODEL_TENSOR.POS_EMBD: (
  49. "transformer.wpe", # gpt2
  50. "embeddings.position_embeddings", # bert
  51. "wpe", # gpt2
  52. ),
  53. # Output
  54. MODEL_TENSOR.OUTPUT: (
  55. "embed_out", # gptneox
  56. "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 phimoe
  57. "output", # llama-pth bloom internlm2
  58. "word_embeddings_for_head", # persimmon
  59. "lm_head.linear", # phi2
  60. "output_layer", # chatglm
  61. "head", # rwkv
  62. "head.out", # wavtokenizer
  63. "language_model.lm_head", # llama4
  64. ),
  65. # Output norm
  66. MODEL_TENSOR.OUTPUT_NORM: (
  67. "gpt_neox.final_layer_norm", # gptneox
  68. "transformer.ln_f", # gpt2 gpt-j falcon jais exaone
  69. "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 phimoe
  70. "norm", # llama-pth
  71. "transformer.norm_f", # mpt dbrx
  72. "ln_f", # refact bloom qwen gpt2
  73. "language_model.encoder.final_layernorm", # persimmon
  74. "model.final_layernorm", # persimmon
  75. "lm_head.ln", # phi2
  76. "model.norm_f", # mamba-qbert
  77. "backbone.norm_f", # mamba
  78. "transformer.rms_norm", # Grok
  79. "encoder.final_layernorm", # chatglm
  80. "transformer.norm", # openelm
  81. "model.norm", # nemotron
  82. "rwkv.ln_out", # rwkv6
  83. "model.ln_out", # rwkv7
  84. "backbone.final_layer_norm", # wavtokenizer
  85. "language_model.model.norm", # llama4
  86. ),
  87. # Rope frequencies
  88. MODEL_TENSOR.ROPE_FREQS: (
  89. "rope.freqs", # llama-pth
  90. "rotary_pos_emb.inv_freq", # chatglm
  91. ),
  92. MODEL_TENSOR.ROPE_FACTORS_LONG: (),
  93. MODEL_TENSOR.ROPE_FACTORS_SHORT: (),
  94. MODEL_TENSOR.CONV1D: (
  95. "backbone.embed", # roberta
  96. ),
  97. }
  98. block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  99. # Attention norm
  100. MODEL_TENSOR.ATTN_NORM: (
  101. "gpt_neox.layers.{bid}.input_layernorm", # gptneox
  102. "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone
  103. "transformer.blocks.{bid}.norm_1", # mpt
  104. "transformer.h.{bid}.input_layernorm", # falcon7b
  105. "h.{bid}.input_layernorm", # bloom
  106. "transformer.h.{bid}.ln_mlp", # falcon40b
  107. "model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe phimoe
  108. "layers.{bid}.attention_norm", # llama-pth
  109. "language_model.encoder.layers.{bid}.input_layernorm", # persimmon
  110. "model.layers.{bid}.ln1", # yi
  111. "h.{bid}.ln_1", # gpt2
  112. "transformer.h.{bid}.ln", # phi2
  113. "model.layers.layers.{bid}.norm", # plamo
  114. "model.layers.{bid}.attention_norm", # internlm2
  115. "model.layers.{bid}.norm", # mamba-qbert
  116. "backbone.layers.{bid}.norm", # mamba
  117. "transformer.decoder_layer.{bid}.rms_norm", # Grok
  118. "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
  119. "encoder.layers.{bid}.input_layernorm", # chatglm
  120. "transformer.layers.{bid}.attn_norm", # openelm
  121. "rwkv.blocks.{bid}.ln1", # rwkv6
  122. "model.layers.{bid}.ln1", # rwkv7
  123. "language_model.model.layers.{bid}.input_layernorm", # llama4
  124. ),
  125. # Attention norm 2
  126. MODEL_TENSOR.ATTN_NORM_2: (
  127. "transformer.h.{bid}.ln_attn", # falcon40b
  128. "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
  129. "rwkv.blocks.{bid}.ln2", # rwkv6
  130. "model.layers.{bid}.ln2", # rwkv7
  131. ),
  132. # Attention query-key-value
  133. MODEL_TENSOR.ATTN_QKV: (
  134. "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
  135. "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
  136. "transformer.blocks.{bid}.attn.Wqkv", # mpt
  137. "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
  138. "transformer.h.{bid}.self_attention.query_key_value", # falcon
  139. "h.{bid}.self_attention.query_key_value", # bloom
  140. "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
  141. "model.layers.{bid}.self_attn.query_key_value", # persimmon
  142. "h.{bid}.attn.c_attn", # gpt2
  143. "transformer.h.{bid}.mixer.Wqkv", # phi2
  144. "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
  145. "model.layers.{bid}.self_attn.qkv_proj", # phi3
  146. "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
  147. "transformer.layers.{bid}.attn.qkv_proj", # openelm
  148. ),
  149. # Attention query
  150. MODEL_TENSOR.ATTN_Q: (
  151. "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 phimoe
  152. "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom
  153. "layers.{bid}.attention.wq", # llama-pth
  154. "encoder.layer.{bid}.attention.self.query", # bert
  155. "transformer.h.{bid}.attn.q_proj", # gpt-j
  156. "model.layers.layers.{bid}.self_attn.q_proj", # plamo
  157. "model.layers.{bid}.attention.wq", # internlm2
  158. "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
  159. "transformer.h.{bid}.attn.attention.q_proj", # exaone
  160. "language_model.model.layers.{bid}.self_attn.q_proj", # llama4
  161. ),
  162. # Attention key
  163. MODEL_TENSOR.ATTN_K: (
  164. "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 phimoe
  165. "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom
  166. "layers.{bid}.attention.wk", # llama-pth
  167. "encoder.layer.{bid}.attention.self.key", # bert
  168. "transformer.h.{bid}.attn.k_proj", # gpt-j
  169. "transformer.h.{bid}.attn.k", # refact
  170. "model.layers.layers.{bid}.self_attn.k_proj", # plamo
  171. "model.layers.{bid}.attention.wk", # internlm2
  172. "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
  173. "transformer.h.{bid}.attn.attention.k_proj", # exaone
  174. "language_model.model.layers.{bid}.self_attn.k_proj", # llama4
  175. ),
  176. # Attention value
  177. MODEL_TENSOR.ATTN_V: (
  178. "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe
  179. "layers.{bid}.attention.wv", # llama-pth
  180. "encoder.layer.{bid}.attention.self.value", # bert
  181. "transformer.h.{bid}.attn.v_proj", # gpt-j
  182. "transformer.h.{bid}.attn.v", # refact
  183. "model.layers.layers.{bid}.self_attn.v_proj", # plamo
  184. "model.layers.{bid}.attention.wv", # internlm2
  185. "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
  186. "transformer.h.{bid}.attn.attention.v_proj", # exaone
  187. "language_model.model.layers.{bid}.self_attn.v_proj", # llama4
  188. ),
  189. # Attention output
  190. MODEL_TENSOR.ATTN_OUT: (
  191. "gpt_neox.layers.{bid}.attention.dense", # gptneox
  192. "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
  193. "transformer.blocks.{bid}.attn.out_proj", # mpt
  194. "transformer.h.{bid}.self_attention.dense", # falcon
  195. "h.{bid}.self_attention.dense", # bloom
  196. "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 phimoe
  197. "model.layers.{bid}.self_attn.linear_attn", # deci
  198. "layers.{bid}.attention.wo", # llama-pth
  199. "encoder.layer.{bid}.attention.output.dense", # bert
  200. "transformer.h.{bid}.attn.out_proj", # gpt-j
  201. "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
  202. "model.layers.{bid}.self_attn.dense", # persimmon
  203. "h.{bid}.attn.c_proj", # gpt2
  204. "transformer.h.{bid}.mixer.out_proj", # phi2
  205. "model.layers.layers.{bid}.self_attn.o_proj", # plamo
  206. "model.layers.{bid}.attention.wo", # internlm2
  207. "encoder.layers.{bid}.attn.out_proj", # nomic-bert
  208. "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
  209. "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
  210. "encoder.layers.{bid}.self_attention.dense", # chatglm
  211. "transformer.layers.{bid}.attn.out_proj", # openelm
  212. "transformer.h.{bid}.attn.attention.out_proj", # exaone
  213. "language_model.model.layers.{bid}.self_attn.o_proj", # llama4
  214. ),
  215. # Attention output norm
  216. MODEL_TENSOR.ATTN_OUT_NORM: (
  217. "encoder.layer.{bid}.attention.output.LayerNorm", # bert
  218. "encoder.layers.{bid}.norm1", # nomic-bert
  219. "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
  220. "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
  221. ),
  222. MODEL_TENSOR.ATTN_POST_NORM: (
  223. "model.layers.{bid}.post_attention_layernorm", # gemma2 olmo2 # ge
  224. "model.layers.{bid}.post_self_attn_layernorm", # glm-4-0414
  225. ),
  226. # Rotary embeddings
  227. MODEL_TENSOR.ATTN_ROT_EMBD: (
  228. "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
  229. "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
  230. "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
  231. "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
  232. ),
  233. # Feed-forward norm
  234. MODEL_TENSOR.FFN_NORM: (
  235. "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
  236. "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone
  237. "h.{bid}.post_attention_layernorm", # bloom
  238. "transformer.blocks.{bid}.norm_2", # mpt
  239. "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe phimoe
  240. "layers.{bid}.ffn_norm", # llama-pth
  241. "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
  242. "model.layers.{bid}.ln2", # yi
  243. "h.{bid}.ln_2", # gpt2
  244. "model.layers.{bid}.ffn_norm", # internlm2
  245. "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
  246. "encoder.layers.{bid}.post_attention_layernorm", # chatglm
  247. "transformer.layers.{bid}.ffn_norm", # openelm
  248. "language_model.model.layers.{bid}.post_attention_layernorm", # llama4
  249. ),
  250. # Post feed-forward norm
  251. MODEL_TENSOR.FFN_PRE_NORM: (
  252. "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
  253. ),
  254. # Post feed-forward norm
  255. MODEL_TENSOR.FFN_POST_NORM: (
  256. "model.layers.{bid}.post_feedforward_layernorm", # gemma2 olmo2
  257. "model.layers.{bid}.post_mlp_layernorm", # glm-4-0414
  258. ),
  259. MODEL_TENSOR.FFN_GATE_INP: (
  260. "layers.{bid}.feed_forward.gate", # mixtral
  261. "model.layers.{bid}.block_sparse_moe.gate", # mixtral phimoe
  262. "model.layers.{bid}.mlp.gate", # qwen2moe olmoe
  263. "transformer.decoder_layer.{bid}.router", # Grok
  264. "transformer.blocks.{bid}.ffn.router.layer", # dbrx
  265. "model.layers.{bid}.block_sparse_moe.router.layer", # granitemoe
  266. "language_model.model.layers.{bid}.feed_forward.router", # llama4
  267. ),
  268. MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
  269. "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
  270. ),
  271. MODEL_TENSOR.FFN_EXP_PROBS_B: (
  272. "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3
  273. ),
  274. # Feed-forward up
  275. MODEL_TENSOR.FFN_UP: (
  276. "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
  277. "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
  278. "transformer.blocks.{bid}.ffn.up_proj", # mpt
  279. "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
  280. "h.{bid}.mlp.dense_h_to_4h", # bloom
  281. "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron olmo2
  282. "layers.{bid}.feed_forward.w3", # llama-pth
  283. "encoder.layer.{bid}.intermediate.dense", # bert
  284. "transformer.h.{bid}.mlp.fc_in", # gpt-j
  285. "transformer.h.{bid}.mlp.linear_3", # refact
  286. "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  287. "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  288. "transformer.h.{bid}.mlp.w1", # qwen
  289. "h.{bid}.mlp.c_fc", # gpt2
  290. "transformer.h.{bid}.mlp.fc1", # phi2
  291. "model.layers.{bid}.mlp.fc1", # phi2
  292. "model.layers.{bid}.mlp.gate_up_proj", # phi3 glm-4-0414
  293. "model.layers.layers.{bid}.mlp.up_proj", # plamo
  294. "model.layers.{bid}.feed_forward.w3", # internlm2
  295. "encoder.layers.{bid}.mlp.fc11", # nomic-bert
  296. "model.layers.{bid}.mlp.c_fc", # starcoder2
  297. "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
  298. "model.layers.{bid}.residual_mlp.w3", # arctic
  299. "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
  300. "transformer.h.{bid}.mlp.c_fc_1", # exaone
  301. "language_model.model.layers.{bid}.feed_forward.up_proj", # llama4
  302. ),
  303. MODEL_TENSOR.FFN_UP_EXP: (
  304. "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
  305. "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
  306. "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
  307. "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged)
  308. "model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged)
  309. "language_model.model.layers.{bid}.feed_forward.experts.up_proj", # llama4
  310. ),
  311. MODEL_TENSOR.FFN_UP_SHEXP: (
  312. "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
  313. "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek deepseek2
  314. "language_model.model.layers.{bid}.feed_forward.shared_expert.up_proj", # llama4
  315. ),
  316. # AWQ-activation gate
  317. MODEL_TENSOR.FFN_ACT: (
  318. "transformer.blocks.{bid}.ffn.act", # mpt
  319. ),
  320. # Feed-forward gate
  321. MODEL_TENSOR.FFN_GATE: (
  322. "model.layers.{bid}.mlp.gate_proj", # llama-hf refact olmo2
  323. "layers.{bid}.feed_forward.w1", # llama-pth
  324. "transformer.h.{bid}.mlp.w2", # qwen
  325. "transformer.h.{bid}.mlp.c_fc2", # jais
  326. "model.layers.layers.{bid}.mlp.gate_proj", # plamo
  327. "model.layers.{bid}.feed_forward.w1", # internlm2
  328. "encoder.layers.{bid}.mlp.fc12", # nomic-bert
  329. "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
  330. "transformer.h.{bid}.mlp.linear_1", # refact
  331. "model.layers.{bid}.residual_mlp.w1", # arctic
  332. "transformer.h.{bid}.mlp.c_fc_0", # exaone
  333. "language_model.model.layers.{bid}.feed_forward.gate_proj", # llama4
  334. ),
  335. MODEL_TENSOR.FFN_GATE_EXP: (
  336. "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
  337. "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
  338. "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
  339. "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged)
  340. "model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged)
  341. "language_model.model.layers.{bid}.feed_forward.experts.gate_proj", # llama4
  342. ),
  343. MODEL_TENSOR.FFN_GATE_SHEXP: (
  344. "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
  345. "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek deepseek2
  346. "language_model.model.layers.{bid}.feed_forward.shared_expert.gate_proj", # llama4
  347. ),
  348. # Feed-forward down
  349. MODEL_TENSOR.FFN_DOWN: (
  350. "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
  351. "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
  352. "transformer.blocks.{bid}.ffn.down_proj", # mpt
  353. "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
  354. "h.{bid}.mlp.dense_4h_to_h", # bloom
  355. "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron olmo2
  356. "layers.{bid}.feed_forward.w2", # llama-pth
  357. "encoder.layer.{bid}.output.dense", # bert
  358. "transformer.h.{bid}.mlp.fc_out", # gpt-j
  359. "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  360. "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  361. "h.{bid}.mlp.c_proj", # gpt2
  362. "transformer.h.{bid}.mlp.fc2", # phi2
  363. "model.layers.{bid}.mlp.fc2", # phi2
  364. "model.layers.layers.{bid}.mlp.down_proj", # plamo
  365. "model.layers.{bid}.feed_forward.w2", # internlm2
  366. "encoder.layers.{bid}.mlp.fc2", # nomic-bert
  367. "model.layers.{bid}.mlp.c_proj", # starcoder2
  368. "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
  369. "transformer.layers.{bid}.ffn.proj_2", # openelm
  370. "model.layers.{bid}.residual_mlp.w2", # arctic
  371. "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
  372. "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
  373. "model.layers.h.{bid}.mlp.c_proj", # exaone
  374. "language_model.model.layers.{bid}.feed_forward.down_proj", # llama4
  375. ),
  376. MODEL_TENSOR.FFN_DOWN_EXP: (
  377. "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
  378. "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
  379. "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
  380. "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged)
  381. "model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe
  382. "model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged)
  383. "language_model.model.layers.{bid}.feed_forward.experts.down_proj", # llama4
  384. ),
  385. MODEL_TENSOR.FFN_DOWN_SHEXP: (
  386. "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
  387. "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek deepseek2
  388. "language_model.model.layers.{bid}.feed_forward.shared_expert.down_proj", # llama4
  389. ),
  390. MODEL_TENSOR.ATTN_Q_NORM: (
  391. "language_model.encoder.layers.{bid}.self_attention.q_layernorm",
  392. "model.layers.{bid}.self_attn.q_layernorm", # persimmon
  393. "model.layers.{bid}.self_attn.q_norm", # cohere olmoe chameleon olmo2
  394. "transformer.blocks.{bid}.attn.q_ln", # sea-lion
  395. "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
  396. "transformer.layers.{bid}.attn.q_norm", # openelm
  397. ),
  398. MODEL_TENSOR.ATTN_K_NORM: (
  399. "language_model.encoder.layers.{bid}.self_attention.k_layernorm",
  400. "model.layers.{bid}.self_attn.k_layernorm", # persimmon
  401. "model.layers.{bid}.self_attn.k_norm", # cohere olmoe chameleon olmo2
  402. "transformer.blocks.{bid}.attn.k_ln", # sea-lion
  403. "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
  404. "transformer.layers.{bid}.attn.k_norm", # openelm
  405. ),
  406. MODEL_TENSOR.ROPE_FREQS: (
  407. "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
  408. ),
  409. MODEL_TENSOR.LAYER_OUT_NORM: (
  410. "encoder.layer.{bid}.output.LayerNorm", # bert
  411. "encoder.layers.{bid}.norm2", # nomic-bert
  412. "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
  413. "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
  414. "encoder.layer.{bid}.layer_norm_2" # jina-v2-code
  415. ),
  416. MODEL_TENSOR.SSM_IN: (
  417. "model.layers.{bid}.in_proj",
  418. "backbone.layers.{bid}.mixer.in_proj",
  419. ),
  420. MODEL_TENSOR.SSM_CONV1D: (
  421. "model.layers.{bid}.conv1d",
  422. "backbone.layers.{bid}.mixer.conv1d",
  423. ),
  424. MODEL_TENSOR.SSM_X: (
  425. "model.layers.{bid}.x_proj",
  426. "backbone.layers.{bid}.mixer.x_proj",
  427. ),
  428. MODEL_TENSOR.SSM_DT: (
  429. "model.layers.{bid}.dt_proj",
  430. "backbone.layers.{bid}.mixer.dt_proj",
  431. ),
  432. MODEL_TENSOR.SSM_A: (
  433. "model.layers.{bid}.A_log",
  434. "backbone.layers.{bid}.mixer.A_log",
  435. ),
  436. MODEL_TENSOR.SSM_D: (
  437. "model.layers.{bid}.D",
  438. "backbone.layers.{bid}.mixer.D",
  439. ),
  440. MODEL_TENSOR.SSM_OUT: (
  441. "model.layers.{bid}.out_proj",
  442. "backbone.layers.{bid}.mixer.out_proj",
  443. ),
  444. MODEL_TENSOR.TIME_MIX_W0: (
  445. "model.layers.{bid}.attention.w0", # rwkv7
  446. ),
  447. MODEL_TENSOR.TIME_MIX_W1: (
  448. "rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv6
  449. "model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2
  450. "model.layers.{bid}.attention.w1", # rwkv7
  451. ),
  452. MODEL_TENSOR.TIME_MIX_W2: (
  453. "rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv6
  454. "model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2
  455. "model.layers.{bid}.attention.w2", # rwkv7
  456. ),
  457. MODEL_TENSOR.TIME_MIX_A0: (
  458. "model.layers.{bid}.attention.a0", # rwkv7
  459. ),
  460. MODEL_TENSOR.TIME_MIX_A1: (
  461. "model.layers.{bid}.attention.a1", # rwkv7
  462. ),
  463. MODEL_TENSOR.TIME_MIX_A2: (
  464. "model.layers.{bid}.attention.a2", # rwkv7
  465. ),
  466. MODEL_TENSOR.TIME_MIX_V0: (
  467. "model.layers.{bid}.attention.v0", # rwkv7
  468. ),
  469. MODEL_TENSOR.TIME_MIX_V1: (
  470. "model.layers.{bid}.attention.v1", # rwkv7
  471. ),
  472. MODEL_TENSOR.TIME_MIX_V2: (
  473. "model.layers.{bid}.attention.v2", # rwkv7
  474. ),
  475. MODEL_TENSOR.TIME_MIX_G1: (
  476. "model.layers.{bid}.attention.g1", # rwkv7
  477. ),
  478. MODEL_TENSOR.TIME_MIX_G2: (
  479. "model.layers.{bid}.attention.g2", # rwkv7
  480. ),
  481. MODEL_TENSOR.TIME_MIX_K_K: (
  482. "model.layers.{bid}.attention.k_k", # rwkv7
  483. ),
  484. MODEL_TENSOR.TIME_MIX_K_A: (
  485. "model.layers.{bid}.attention.k_a", # rwkv7
  486. ),
  487. MODEL_TENSOR.TIME_MIX_R_K: (
  488. "model.layers.{bid}.attention.r_k", # rwkv7
  489. ),
  490. MODEL_TENSOR.TIME_MIX_LERP_X: (
  491. "rwkv.blocks.{bid}.attention.time_maa_x", # rwkv6
  492. "model.layers.{bid}.self_attn.time_maa_x", # rwkv6qwen2
  493. ),
  494. MODEL_TENSOR.TIME_MIX_LERP_K: (
  495. "rwkv.blocks.{bid}.attention.time_maa_k", # rwkv6
  496. "model.layers.{bid}.self_attn.time_maa_k", # rwkv6qwen2
  497. ),
  498. MODEL_TENSOR.TIME_MIX_LERP_V: (
  499. "rwkv.blocks.{bid}.attention.time_maa_v", # rwkv6
  500. "model.layers.{bid}.self_attn.time_maa_v", # rwkv6qwen2
  501. ),
  502. MODEL_TENSOR.TIME_MIX_LERP_R: (
  503. "rwkv.blocks.{bid}.attention.time_maa_r", # rwkv6
  504. "model.layers.{bid}.self_attn.time_maa_r", # rwkv6qwen2
  505. ),
  506. MODEL_TENSOR.TIME_MIX_LERP_G: (
  507. "rwkv.blocks.{bid}.attention.time_maa_g", # rwkv6
  508. "model.layers.{bid}.self_attn.time_maa_g", # rwkv6qwen2
  509. ),
  510. MODEL_TENSOR.TIME_MIX_LERP_W: (
  511. "rwkv.blocks.{bid}.attention.time_maa_w", # rwkv6
  512. "model.layers.{bid}.self_attn.time_maa_w", # rwkv6qwen2
  513. ),
  514. MODEL_TENSOR.TIME_MIX_FIRST: (
  515. "rwkv.blocks.{bid}.attention.time_faaaa", # rwkv6
  516. ),
  517. MODEL_TENSOR.TIME_MIX_DECAY: (
  518. "rwkv.blocks.{bid}.attention.time_decay", # rwkv6
  519. "model.layers.{bid}.self_attn.time_decay", # rwkv6qwen2
  520. ),
  521. MODEL_TENSOR.TIME_MIX_DECAY_W1: (
  522. "rwkv.blocks.{bid}.attention.time_decay_w1", # rwkv6
  523. "model.layers.{bid}.self_attn.time_decay_w1", # rwkv6qwen2
  524. ),
  525. MODEL_TENSOR.TIME_MIX_DECAY_W2: (
  526. "rwkv.blocks.{bid}.attention.time_decay_w2", # rwkv6
  527. "model.layers.{bid}.self_attn.time_decay_w2", # rwkv6qwen2
  528. ),
  529. MODEL_TENSOR.TIME_MIX_KEY: (
  530. "rwkv.blocks.{bid}.attention.key", # rwkv6
  531. "model.layers.{bid}.self_attn.k_proj", # rwkv6qwen2
  532. "model.layers.{bid}.attention.key", # rwkv7
  533. "model.layers.{bid}.attention.k_proj", # rwkv7
  534. ),
  535. MODEL_TENSOR.TIME_MIX_VALUE: (
  536. "rwkv.blocks.{bid}.attention.value", # rwkv6
  537. "model.layers.{bid}.self_attn.v_proj", # rwkv6qwen2
  538. "model.layers.{bid}.attention.value", # rwkv7
  539. "model.layers.{bid}.attention.v_proj", # rwkv7
  540. ),
  541. MODEL_TENSOR.TIME_MIX_RECEPTANCE: (
  542. "rwkv.blocks.{bid}.attention.receptance", # rwkv6
  543. "model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2
  544. "model.layers.{bid}.attention.receptance", # rwkv7
  545. "model.layers.{bid}.attention.r_proj", # rwkv7
  546. ),
  547. MODEL_TENSOR.TIME_MIX_GATE: (
  548. "rwkv.blocks.{bid}.attention.gate", # rwkv6
  549. "model.layers.{bid}.self_attn.gate", # rwkv6qwen2
  550. ),
  551. MODEL_TENSOR.TIME_MIX_LN: (
  552. "rwkv.blocks.{bid}.attention.ln_x", # rwkv6
  553. "model.layers.{bid}.attention.ln_x" # rwkv7
  554. ),
  555. MODEL_TENSOR.TIME_MIX_OUTPUT: (
  556. "rwkv.blocks.{bid}.attention.output", # rwkv6
  557. "model.layers.{bid}.self_attn.o_proj", # rwkv6qwen2
  558. "model.layers.{bid}.attention.output", # rwkv7
  559. "model.layers.{bid}.attention.o_proj", # rwkv7
  560. ),
  561. MODEL_TENSOR.CHANNEL_MIX_LERP_K: (
  562. "rwkv.blocks.{bid}.feed_forward.time_maa_k", # rwkv6
  563. "model.layers.{bid}.feed_forward.x_k", # rwkv7
  564. ),
  565. MODEL_TENSOR.CHANNEL_MIX_LERP_R: (
  566. "rwkv.blocks.{bid}.feed_forward.time_maa_r", # rwkv6
  567. ),
  568. MODEL_TENSOR.CHANNEL_MIX_KEY: (
  569. "rwkv.blocks.{bid}.feed_forward.key", # rwkv6
  570. "model.layers.{bid}.feed_forward.key", # rwkv7
  571. ),
  572. MODEL_TENSOR.CHANNEL_MIX_RECEPTANCE: (
  573. "rwkv.blocks.{bid}.feed_forward.receptance", # rwkv6
  574. ),
  575. MODEL_TENSOR.CHANNEL_MIX_VALUE: (
  576. "rwkv.blocks.{bid}.feed_forward.value", # rwkv6
  577. "model.layers.{bid}.feed_forward.value", # rwkv7
  578. ),
  579. MODEL_TENSOR.ATTN_Q_A: (
  580. "model.layers.{bid}.self_attn.q_a_proj", # deepseek2
  581. ),
  582. MODEL_TENSOR.ATTN_Q_B: (
  583. "model.layers.{bid}.self_attn.q_b_proj", # deepseek2
  584. ),
  585. MODEL_TENSOR.ATTN_KV_A_MQA: (
  586. "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
  587. ),
  588. MODEL_TENSOR.ATTN_KV_B: (
  589. "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
  590. ),
  591. MODEL_TENSOR.ATTN_K_B: (
  592. "model.layers.{bid}.self_attn.k_b_proj", # deepseek2
  593. ),
  594. MODEL_TENSOR.ATTN_V_B: (
  595. "model.layers.{bid}.self_attn.v_b_proj", # deepseek2
  596. ),
  597. MODEL_TENSOR.ATTN_Q_A_NORM: (
  598. "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
  599. ),
  600. MODEL_TENSOR.ATTN_KV_A_NORM: (
  601. "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
  602. ),
  603. MODEL_TENSOR.ATTN_SUB_NORM: (
  604. "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
  605. ),
  606. MODEL_TENSOR.FFN_SUB_NORM: (
  607. "model.layers.{bid}.mlp.ffn_layernorm", # bitnet
  608. ),
  609. MODEL_TENSOR.DEC_ATTN_NORM: (
  610. "decoder.block.{bid}.layer.0.layer_norm", # t5
  611. ),
  612. MODEL_TENSOR.DEC_ATTN_Q: (
  613. "decoder.block.{bid}.layer.0.SelfAttention.q", # t5
  614. ),
  615. MODEL_TENSOR.DEC_ATTN_K: (
  616. "decoder.block.{bid}.layer.0.SelfAttention.k", # t5
  617. ),
  618. MODEL_TENSOR.DEC_ATTN_V: (
  619. "decoder.block.{bid}.layer.0.SelfAttention.v", # t5
  620. ),
  621. MODEL_TENSOR.DEC_ATTN_OUT: (
  622. "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
  623. ),
  624. MODEL_TENSOR.DEC_ATTN_REL_B: (
  625. "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  626. ),
  627. MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
  628. "decoder.block.{bid}.layer.1.layer_norm", # t5
  629. ),
  630. MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
  631. "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
  632. ),
  633. MODEL_TENSOR.DEC_CROSS_ATTN_K: (
  634. "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
  635. ),
  636. MODEL_TENSOR.DEC_CROSS_ATTN_V: (
  637. "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
  638. ),
  639. MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
  640. "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
  641. ),
  642. MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
  643. "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
  644. ),
  645. MODEL_TENSOR.DEC_FFN_NORM: (
  646. "decoder.block.{bid}.layer.2.layer_norm", # t5
  647. ),
  648. MODEL_TENSOR.DEC_FFN_GATE: (
  649. "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
  650. ),
  651. MODEL_TENSOR.DEC_FFN_UP: (
  652. "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
  653. "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
  654. ),
  655. MODEL_TENSOR.DEC_FFN_DOWN: (
  656. "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
  657. ),
  658. MODEL_TENSOR.DEC_OUTPUT_NORM: (
  659. "decoder.final_layer_norm", # t5
  660. ),
  661. MODEL_TENSOR.ENC_ATTN_NORM: (
  662. "encoder.block.{bid}.layer.0.layer_norm", # t5
  663. ),
  664. MODEL_TENSOR.ENC_ATTN_Q: (
  665. "encoder.block.{bid}.layer.0.SelfAttention.q", # t5
  666. ),
  667. MODEL_TENSOR.ENC_ATTN_K: (
  668. "encoder.block.{bid}.layer.0.SelfAttention.k", # t5
  669. ),
  670. MODEL_TENSOR.ENC_ATTN_V: (
  671. "encoder.block.{bid}.layer.0.SelfAttention.v", # t5
  672. ),
  673. MODEL_TENSOR.ENC_ATTN_OUT: (
  674. "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
  675. ),
  676. MODEL_TENSOR.ENC_ATTN_REL_B: (
  677. "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  678. ),
  679. MODEL_TENSOR.ENC_FFN_NORM: (
  680. "encoder.block.{bid}.layer.1.layer_norm", # t5
  681. ),
  682. MODEL_TENSOR.ENC_FFN_GATE: (
  683. "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
  684. ),
  685. MODEL_TENSOR.ENC_FFN_UP: (
  686. "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
  687. "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
  688. ),
  689. MODEL_TENSOR.ENC_FFN_DOWN: (
  690. "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
  691. ),
  692. ############################################################################
  693. # TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg
  694. MODEL_TENSOR.ENC_OUTPUT_NORM: (
  695. "encoder.final_layer_norm", # t5
  696. ),
  697. MODEL_TENSOR.CLS: (
  698. "classifier", # jina
  699. "classifier.dense", # roberta
  700. ),
  701. MODEL_TENSOR.CLS_OUT: (
  702. "classifier.out_proj", # roberta
  703. ),
  704. #############################################################################
  705. MODEL_TENSOR.CONVNEXT_DW: (
  706. "backbone.convnext.{bid}.dwconv", # wavtokenizer
  707. ),
  708. MODEL_TENSOR.CONVNEXT_NORM: (
  709. "backbone.convnext.{bid}.norm", # wavtokenizer
  710. ),
  711. MODEL_TENSOR.CONVNEXT_PW1: (
  712. "backbone.convnext.{bid}.pwconv1", # wavtokenizer
  713. ),
  714. MODEL_TENSOR.CONVNEXT_PW2: (
  715. "backbone.convnext.{bid}.pwconv2", # wavtokenizer
  716. ),
  717. MODEL_TENSOR.CONVNEXT_GAMMA: (
  718. "backbone.convnext.{bid}.gamma", # wavtokenizer
  719. ),
  720. MODEL_TENSOR.POSNET_CONV1: (
  721. "backbone.posnet.{bid}.conv1", # wavtokenizer
  722. ),
  723. MODEL_TENSOR.POSNET_CONV2: (
  724. "backbone.posnet.{bid}.conv2", # wavtokenizer
  725. ),
  726. MODEL_TENSOR.POSNET_NORM: (
  727. "backbone.posnet.{bid}.norm", # wavtokenizer
  728. ),
  729. MODEL_TENSOR.POSNET_NORM1: (
  730. "backbone.posnet.{bid}.norm1", # wavtokenizer
  731. ),
  732. MODEL_TENSOR.POSNET_NORM2: (
  733. "backbone.posnet.{bid}.norm2", # wavtokenizer
  734. ),
  735. MODEL_TENSOR.POSNET_ATTN_NORM: (
  736. "backbone.posnet.{bid}.norm", # wavtokenizer
  737. ),
  738. MODEL_TENSOR.POSNET_ATTN_Q: (
  739. "backbone.posnet.{bid}.q", # wavtokenizer
  740. ),
  741. MODEL_TENSOR.POSNET_ATTN_K: (
  742. "backbone.posnet.{bid}.k", # wavtokenizer
  743. ),
  744. MODEL_TENSOR.POSNET_ATTN_V: (
  745. "backbone.posnet.{bid}.v", # wavtokenizer
  746. ),
  747. MODEL_TENSOR.POSNET_ATTN_OUT: (
  748. "backbone.posnet.{bid}.proj_out", # wavtokenizer
  749. ),
  750. }
  751. # architecture-specific block mappings
  752. arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
  753. MODEL_ARCH.ARCTIC: {
  754. MODEL_TENSOR.FFN_NORM: (
  755. "model.layers.{bid}.residual_layernorm",
  756. ),
  757. MODEL_TENSOR.FFN_NORM_EXP: (
  758. "model.layers.{bid}.post_attention_layernorm",
  759. ),
  760. },
  761. }
  762. mapping: dict[str, tuple[MODEL_TENSOR, str]]
  763. def __init__(self, arch: MODEL_ARCH, n_blocks: int):
  764. self.mapping = {}
  765. for tensor, keys in self.mappings_cfg.items():
  766. if tensor not in MODEL_TENSORS[arch]:
  767. continue
  768. tensor_name = TENSOR_NAMES[tensor]
  769. self.mapping[tensor_name] = (tensor, tensor_name)
  770. for key in keys:
  771. self.mapping[key] = (tensor, tensor_name)
  772. if arch in self.arch_block_mappings_cfg:
  773. self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
  774. for bid in range(n_blocks):
  775. for tensor, keys in self.block_mappings_cfg.items():
  776. if tensor not in MODEL_TENSORS[arch]:
  777. continue
  778. tensor_name = TENSOR_NAMES[tensor].format(bid = bid)
  779. self.mapping[tensor_name] = (tensor, tensor_name)
  780. for key in keys:
  781. key = key.format(bid = bid)
  782. self.mapping[key] = (tensor, tensor_name)
  783. def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
  784. result = self.mapping.get(key)
  785. if result is not None:
  786. return result
  787. for suffix in try_suffixes:
  788. if key.endswith(suffix):
  789. result = self.mapping.get(key[:-len(suffix)])
  790. if result is not None:
  791. return result[0], result[1] + suffix
  792. return None
  793. def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
  794. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  795. if result is None:
  796. return None
  797. return result[1]
  798. def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
  799. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  800. if result is None:
  801. return None
  802. return result[0]
  803. def __getitem__(self, key: str) -> str:
  804. try:
  805. return self.mapping[key][1]
  806. except KeyError:
  807. raise KeyError(key)
  808. def __contains__(self, key: str) -> bool:
  809. return key in self.mapping
  810. def __repr__(self) -> str:
  811. return repr(self.mapping)
  812. def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
  813. return TensorNameMap(arch, n_blocks)