tensor_mapping.py 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. from __future__ import annotations
  2. from typing import Sequence
  3. from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
  4. class TensorNameMap:
  5. mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  6. # Token embeddings
  7. MODEL_TENSOR.TOKEN_EMBD: (
  8. "gpt_neox.embed_in", # gptneox
  9. "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone
  10. "transformer.word_embeddings", # falcon
  11. "word_embeddings", # bloom
  12. "model.embed_tokens", # llama-hf nemotron olmoe olmo2 rwkv6qwen2 glm4-0414
  13. "tok_embeddings", # llama-pth
  14. "embeddings.word_embeddings", # bert nomic-bert
  15. "language_model.embedding.word_embeddings", # persimmon
  16. "wte", # gpt2
  17. "transformer.embd.wte", # phi2
  18. "model.tok_embeddings", # internlm2
  19. "model.embedding", # mamba-qbert
  20. "backbone.embedding", # mamba
  21. "backbone.embeddings", # mamba-hf
  22. "transformer.in_out_embed", # Grok
  23. "embedding.word_embeddings", # chatglm
  24. "transformer.token_embeddings", # openelm
  25. "shared", # t5
  26. "rwkv.embeddings", # rwkv6
  27. "model.embeddings", # rwkv7
  28. "model.word_embeddings", # bailingmoe
  29. "language_model.model.embed_tokens", # llama4
  30. "encoder", # neobert
  31. ),
  32. # Token type embeddings
  33. MODEL_TENSOR.TOKEN_TYPES: (
  34. "embeddings.token_type_embeddings", # bert nomic-bert
  35. ),
  36. # Normalization of token embeddings
  37. MODEL_TENSOR.TOKEN_EMBD_NORM: (
  38. "word_embeddings_layernorm", # bloom
  39. "embeddings.LayerNorm", # bert
  40. "emb_ln", # nomic-bert
  41. "transformer.norm", # openelm
  42. "rwkv.blocks.0.pre_ln", # rwkv
  43. "rwkv.blocks.0.pre_ln", # rwkv6
  44. "model.pre_ln", # rwkv7
  45. "model.layers.0.pre_norm", # rwkv7
  46. "backbone.norm", # wavtokenizer
  47. ),
  48. # Position embeddings
  49. MODEL_TENSOR.POS_EMBD: (
  50. "transformer.wpe", # gpt2
  51. "embeddings.position_embeddings", # bert
  52. "wpe", # gpt2
  53. ),
  54. # Output
  55. MODEL_TENSOR.OUTPUT: (
  56. "embed_out", # gptneox
  57. "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 phimoe
  58. "output", # llama-pth bloom internlm2
  59. "word_embeddings_for_head", # persimmon
  60. "lm_head.linear", # phi2
  61. "output_layer", # chatglm
  62. "head", # rwkv
  63. "head.out", # wavtokenizer
  64. "lm_head", # llama4
  65. ),
  66. # Output norm
  67. MODEL_TENSOR.OUTPUT_NORM: (
  68. "gpt_neox.final_layer_norm", # gptneox
  69. "transformer.ln_f", # gpt2 gpt-j falcon jais exaone
  70. "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 phimoe
  71. "norm", # llama-pth
  72. "transformer.norm_f", # mpt dbrx
  73. "ln_f", # refact bloom qwen gpt2
  74. "language_model.encoder.final_layernorm", # persimmon
  75. "model.final_layernorm", # persimmon
  76. "lm_head.ln", # phi2
  77. "model.norm_f", # mamba-qbert
  78. "backbone.norm_f", # mamba
  79. "transformer.rms_norm", # Grok
  80. "encoder.final_layernorm", # chatglm
  81. "transformer.norm", # openelm
  82. "model.norm", # nemotron
  83. "rwkv.ln_out", # rwkv6
  84. "model.ln_out", # rwkv7
  85. "backbone.final_layer_norm", # wavtokenizer
  86. "model.norm", # llama4
  87. ),
  88. # Rope frequencies
  89. MODEL_TENSOR.ROPE_FREQS: (
  90. "rope.freqs", # llama-pth
  91. "rotary_pos_emb.inv_freq", # chatglm
  92. ),
  93. MODEL_TENSOR.ROPE_FACTORS_LONG: (),
  94. MODEL_TENSOR.ROPE_FACTORS_SHORT: (),
  95. MODEL_TENSOR.CONV1D: (
  96. "backbone.embed", # roberta
  97. ),
  98. }
  99. block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  100. # Attention norm
  101. MODEL_TENSOR.ATTN_NORM: (
  102. "gpt_neox.layers.{bid}.input_layernorm", # gptneox
  103. "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone
  104. "transformer.blocks.{bid}.norm_1", # mpt
  105. "transformer.h.{bid}.input_layernorm", # falcon7b
  106. "h.{bid}.input_layernorm", # bloom
  107. "transformer.h.{bid}.ln_mlp", # falcon40b
  108. "model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe phimoe
  109. "layers.{bid}.attention_norm", # llama-pth
  110. "language_model.encoder.layers.{bid}.input_layernorm", # persimmon
  111. "model.layers.{bid}.ln1", # yi
  112. "h.{bid}.ln_1", # gpt2
  113. "transformer.h.{bid}.ln", # phi2
  114. "model.layers.layers.{bid}.norm", # plamo
  115. "model.layers.{bid}.attention_norm", # internlm2
  116. "model.layers.{bid}.norm", # mamba-qbert
  117. "backbone.layers.{bid}.norm", # mamba
  118. "transformer.decoder_layer.{bid}.rms_norm", # Grok
  119. "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
  120. "encoder.layers.{bid}.input_layernorm", # chatglm
  121. "transformer.layers.{bid}.attn_norm", # openelm
  122. "rwkv.blocks.{bid}.ln1", # rwkv6
  123. "model.layers.{bid}.ln1", # rwkv7
  124. "model.layers.{bid}.input_layernorm", # llama4
  125. "transformer_encoder.{bid}.attention_norm", # neobert
  126. ),
  127. # Attention norm 2
  128. MODEL_TENSOR.ATTN_NORM_2: (
  129. "transformer.h.{bid}.ln_attn", # falcon40b
  130. "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
  131. "rwkv.blocks.{bid}.ln2", # rwkv6
  132. "model.layers.{bid}.ln2", # rwkv7
  133. ),
  134. # Attention query-key-value
  135. MODEL_TENSOR.ATTN_QKV: (
  136. "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
  137. "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
  138. "transformer.blocks.{bid}.attn.Wqkv", # mpt
  139. "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
  140. "transformer.h.{bid}.self_attention.query_key_value", # falcon
  141. "h.{bid}.self_attention.query_key_value", # bloom
  142. "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
  143. "model.layers.{bid}.self_attn.query_key_value", # persimmon
  144. "h.{bid}.attn.c_attn", # gpt2
  145. "transformer.h.{bid}.mixer.Wqkv", # phi2
  146. "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
  147. "encoder.layers.{bid}.mixer.Wqkv", # jina
  148. "model.layers.{bid}.self_attn.qkv_proj", # phi3
  149. "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
  150. "transformer.layers.{bid}.attn.qkv_proj", # openelm
  151. "transformer_encoder.{bid}.qkv", # neobert
  152. ),
  153. # Attention query
  154. MODEL_TENSOR.ATTN_Q: (
  155. "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 phimoe
  156. "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom
  157. "layers.{bid}.attention.wq", # llama-pth
  158. "encoder.layer.{bid}.attention.self.query", # bert
  159. "transformer.layer.{bid}.attention.q_lin", # distillbert
  160. "transformer.h.{bid}.attn.q_proj", # gpt-j
  161. "model.layers.layers.{bid}.self_attn.q_proj", # plamo
  162. "model.layers.{bid}.attention.wq", # internlm2
  163. "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
  164. "transformer.h.{bid}.attn.attention.q_proj", # exaone
  165. "model.layers.{bid}.self_attn.q_proj", # llama4
  166. ),
  167. # Attention key
  168. MODEL_TENSOR.ATTN_K: (
  169. "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 phimoe
  170. "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom
  171. "layers.{bid}.attention.wk", # llama-pth
  172. "encoder.layer.{bid}.attention.self.key", # bert
  173. "transformer.layer.{bid}.attention.k_lin", # distillbert
  174. "transformer.h.{bid}.attn.k_proj", # gpt-j
  175. "transformer.h.{bid}.attn.k", # refact
  176. "model.layers.layers.{bid}.self_attn.k_proj", # plamo
  177. "model.layers.{bid}.attention.wk", # internlm2
  178. "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
  179. "transformer.h.{bid}.attn.attention.k_proj", # exaone
  180. "model.layers.{bid}.self_attn.k_proj", # llama4
  181. ),
  182. # Attention value
  183. MODEL_TENSOR.ATTN_V: (
  184. "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe
  185. "layers.{bid}.attention.wv", # llama-pth
  186. "encoder.layer.{bid}.attention.self.value", # bert
  187. "transformer.layer.{bid}.attention.v_lin", # distillbert
  188. "transformer.h.{bid}.attn.v_proj", # gpt-j
  189. "transformer.h.{bid}.attn.v", # refact
  190. "model.layers.layers.{bid}.self_attn.v_proj", # plamo
  191. "model.layers.{bid}.attention.wv", # internlm2
  192. "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
  193. "transformer.h.{bid}.attn.attention.v_proj", # exaone
  194. "model.layers.{bid}.self_attn.v_proj", # llama4
  195. ),
  196. # Attention output
  197. MODEL_TENSOR.ATTN_OUT: (
  198. "gpt_neox.layers.{bid}.attention.dense", # gptneox
  199. "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
  200. "transformer.blocks.{bid}.attn.out_proj", # mpt
  201. "transformer.h.{bid}.self_attention.dense", # falcon
  202. "h.{bid}.self_attention.dense", # bloom
  203. "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 phimoe
  204. "model.layers.{bid}.self_attn.linear_attn", # deci
  205. "layers.{bid}.attention.wo", # llama-pth
  206. "encoder.layer.{bid}.attention.output.dense", # bert
  207. "transformer.layer.{bid}.attention.out_lin", # distillbert
  208. "transformer.h.{bid}.attn.out_proj", # gpt-j
  209. "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
  210. "model.layers.{bid}.self_attn.dense", # persimmon
  211. "h.{bid}.attn.c_proj", # gpt2
  212. "transformer.h.{bid}.mixer.out_proj", # phi2
  213. "model.layers.layers.{bid}.self_attn.o_proj", # plamo
  214. "model.layers.{bid}.attention.wo", # internlm2
  215. "encoder.layers.{bid}.attn.out_proj", # nomic-bert
  216. "encoder.layers.{bid}.mixer.out_proj", # jina
  217. "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
  218. "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
  219. "encoder.layers.{bid}.self_attention.dense", # chatglm
  220. "transformer.layers.{bid}.attn.out_proj", # openelm
  221. "transformer.h.{bid}.attn.attention.out_proj", # exaone
  222. "model.layers.{bid}.self_attn.o_proj", # llama4
  223. "transformer_encoder.{bid}.wo", # neobert
  224. ),
  225. # Attention output norm
  226. MODEL_TENSOR.ATTN_OUT_NORM: (
  227. "encoder.layer.{bid}.attention.output.LayerNorm", # bert
  228. "transformer.layer.{bid}.sa_layer_norm", # distillbert
  229. "encoder.layers.{bid}.norm1", # nomic-bert
  230. "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
  231. "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
  232. ),
  233. MODEL_TENSOR.ATTN_POST_NORM: (
  234. "model.layers.{bid}.post_attention_layernorm", # gemma2 olmo2 # ge
  235. "model.layers.{bid}.post_self_attn_layernorm", # glm-4-0414
  236. ),
  237. # Rotary embeddings
  238. MODEL_TENSOR.ATTN_ROT_EMBD: (
  239. "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
  240. "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
  241. "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
  242. "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
  243. ),
  244. # Feed-forward norm
  245. MODEL_TENSOR.FFN_NORM: (
  246. "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
  247. "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone
  248. "h.{bid}.post_attention_layernorm", # bloom
  249. "transformer.blocks.{bid}.norm_2", # mpt
  250. "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe phimoe
  251. "layers.{bid}.ffn_norm", # llama-pth
  252. "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
  253. "model.layers.{bid}.ln2", # yi
  254. "h.{bid}.ln_2", # gpt2
  255. "model.layers.{bid}.ffn_norm", # internlm2
  256. "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
  257. "encoder.layers.{bid}.post_attention_layernorm", # chatglm
  258. "transformer.layers.{bid}.ffn_norm", # openelm
  259. "model.layers.{bid}.post_attention_layernorm", # llama4
  260. "transformer_encoder.{bid}.ffn_norm", # neobert
  261. ),
  262. # Post feed-forward norm
  263. MODEL_TENSOR.FFN_PRE_NORM: (
  264. "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
  265. "model.layers.{bid}.pre_ff_layernorm.weight",
  266. ),
  267. # Post feed-forward norm
  268. MODEL_TENSOR.FFN_POST_NORM: (
  269. "model.layers.{bid}.post_feedforward_layernorm", # gemma2 olmo2
  270. "model.layers.{bid}.post_mlp_layernorm", # glm-4-0414
  271. "model.layers.{bid}.feed_forward.up_proj",
  272. ),
  273. MODEL_TENSOR.FFN_GATE_INP: (
  274. "layers.{bid}.feed_forward.gate", # mixtral
  275. "model.layers.{bid}.block_sparse_moe.gate", # mixtral phimoe
  276. "model.layers.{bid}.mlp.gate", # qwen2moe olmoe
  277. "transformer.decoder_layer.{bid}.router", # Grok
  278. "transformer.blocks.{bid}.ffn.router.layer", # dbrx
  279. "model.layers.{bid}.block_sparse_moe.router.layer", # granitemoe
  280. "model.layers.{bid}.feed_forward.router", # llama4
  281. "encoder.layers.{bid}.mlp.router.layer", # nomic-bert-moe
  282. "model.layers.{bid}.mlp.gate.wg", # hunyuan
  283. ),
  284. MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
  285. "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
  286. ),
  287. MODEL_TENSOR.FFN_EXP_PROBS_B: (
  288. "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 dots1
  289. ),
  290. # Feed-forward up
  291. MODEL_TENSOR.FFN_UP: (
  292. "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
  293. "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
  294. "transformer.blocks.{bid}.ffn.up_proj", # mpt
  295. "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
  296. "h.{bid}.mlp.dense_h_to_4h", # bloom
  297. "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron olmo2
  298. "layers.{bid}.feed_forward.w3", # llama-pth
  299. "encoder.layer.{bid}.intermediate.dense", # bert
  300. "transformer.layer.{bid}.ffn.lin1", # distillbert
  301. "transformer.h.{bid}.mlp.fc_in", # gpt-j
  302. "transformer.h.{bid}.mlp.linear_3", # refact
  303. "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  304. "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  305. "transformer.h.{bid}.mlp.w1", # qwen
  306. "h.{bid}.mlp.c_fc", # gpt2
  307. "transformer.h.{bid}.mlp.fc1", # phi2
  308. "model.layers.{bid}.mlp.fc1", # phi2
  309. "model.layers.{bid}.mlp.gate_up_proj", # phi3 glm-4-0414
  310. "model.layers.layers.{bid}.mlp.up_proj", # plamo
  311. "model.layers.{bid}.feed_forward.w3", # internlm2
  312. "encoder.layers.{bid}.mlp.fc11", # nomic-bert
  313. "encoder.layers.{bid}.mlp.fc1", # nomic-bert-moe
  314. "model.layers.{bid}.mlp.c_fc", # starcoder2
  315. "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2 (split up/gate, no longer used)
  316. "encoder.layer.{bid}.mlp.gated_layers", # jina-bert-v2 (GEGLU)
  317. "encoder.layer.{bid}.mlp.up_gated_layer", # jina-v2-code (GEGLU)
  318. "model.layers.{bid}.residual_mlp.w3", # arctic
  319. "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
  320. "transformer.h.{bid}.mlp.c_fc_1", # exaone
  321. "model.layers.{bid}.feed_forward.up_proj", # llama4
  322. "transformer_encoder.{bid}.ffn.w12", # neobert
  323. ),
  324. MODEL_TENSOR.FFN_UP_EXP: (
  325. "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
  326. "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
  327. "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
  328. "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged)
  329. "model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged)
  330. "model.layers.{bid}.feed_forward.experts.up_proj", # llama4
  331. "encoder.layers.{bid}.mlp.experts.mlp.w1", # nomic-bert-moe
  332. ),
  333. MODEL_TENSOR.FFN_UP_SHEXP: (
  334. "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
  335. "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek deepseek2
  336. "model.layers.{bid}.feed_forward.shared_expert.up_proj", # llama4
  337. "model.layers.{bid}.feed_forward.down_proj",
  338. "model.layers.{bid}.mlp.shared_mlp.up_proj", # hunyuan
  339. ),
  340. # AWQ-activation gate
  341. MODEL_TENSOR.FFN_ACT: (
  342. "transformer.blocks.{bid}.ffn.act", # mpt
  343. ),
  344. # Feed-forward gate
  345. MODEL_TENSOR.FFN_GATE: (
  346. "model.layers.{bid}.mlp.gate_proj", # llama-hf refact olmo2
  347. "layers.{bid}.feed_forward.w1", # llama-pth
  348. "transformer.h.{bid}.mlp.w2", # qwen
  349. "transformer.h.{bid}.mlp.c_fc2", # jais
  350. "model.layers.layers.{bid}.mlp.gate_proj", # plamo
  351. "model.layers.{bid}.feed_forward.w1", # internlm2
  352. "encoder.layers.{bid}.mlp.fc12", # nomic-bert
  353. "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 (split up/gate, no longer used)
  354. "transformer.h.{bid}.mlp.linear_1", # refact
  355. "model.layers.{bid}.residual_mlp.w1", # arctic
  356. "transformer.h.{bid}.mlp.c_fc_0", # exaone
  357. "model.layers.{bid}.feed_forward.gate_proj", # llama4
  358. ),
  359. MODEL_TENSOR.FFN_GATE_EXP: (
  360. "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
  361. "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
  362. "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
  363. "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged)
  364. "model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged)
  365. "model.layers.{bid}.feed_forward.experts.gate_proj", # llama4
  366. ),
  367. MODEL_TENSOR.FFN_GATE_SHEXP: (
  368. "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
  369. "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek deepseek2
  370. "model.layers.{bid}.feed_forward.shared_expert.gate_proj", # llama4
  371. "model.layers.{bid}.mlp.shared_mlp.gate_proj", # hunyuan
  372. ),
  373. # Feed-forward down
  374. MODEL_TENSOR.FFN_DOWN: (
  375. "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
  376. "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
  377. "transformer.blocks.{bid}.ffn.down_proj", # mpt
  378. "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
  379. "h.{bid}.mlp.dense_4h_to_h", # bloom
  380. "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron olmo2
  381. "layers.{bid}.feed_forward.w2", # llama-pth
  382. "encoder.layer.{bid}.output.dense", # bert
  383. "transformer.layer.{bid}.ffn.lin2", # distillbert
  384. "transformer.h.{bid}.mlp.fc_out", # gpt-j
  385. "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  386. "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  387. "h.{bid}.mlp.c_proj", # gpt2
  388. "transformer.h.{bid}.mlp.fc2", # phi2
  389. "model.layers.{bid}.mlp.fc2", # phi2
  390. "model.layers.layers.{bid}.mlp.down_proj", # plamo
  391. "model.layers.{bid}.feed_forward.w2", # internlm2
  392. "encoder.layers.{bid}.mlp.fc2", # nomic-bert
  393. "model.layers.{bid}.mlp.c_proj", # starcoder2
  394. "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
  395. "transformer.layers.{bid}.ffn.proj_2", # openelm
  396. "model.layers.{bid}.residual_mlp.w2", # arctic
  397. "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
  398. "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
  399. "model.layers.h.{bid}.mlp.c_proj", # exaone
  400. "model.layers.{bid}.feed_forward.down_proj", # llama4
  401. "transformer_encoder.{bid}.ffn.w3", # neobert
  402. ),
  403. MODEL_TENSOR.FFN_DOWN_EXP: (
  404. "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
  405. "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
  406. "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
  407. "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged)
  408. "model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe
  409. "model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged)
  410. "model.layers.{bid}.feed_forward.experts.down_proj", # llama4
  411. "encoder.layers.{bid}.mlp.experts.mlp.w2", # nomic-bert-moe
  412. ),
  413. MODEL_TENSOR.FFN_DOWN_SHEXP: (
  414. "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
  415. "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek deepseek2
  416. "model.layers.{bid}.feed_forward.shared_expert.down_proj", # llama4
  417. "model.layers.{bid}.shared_mlp.output_linear", # granitemoe
  418. "model.layers.{bid}.mlp.shared_mlp.down_proj", # hunyuan
  419. ),
  420. MODEL_TENSOR.ATTN_Q_NORM: (
  421. "language_model.encoder.layers.{bid}.self_attention.q_layernorm",
  422. "model.layers.{bid}.self_attn.q_layernorm", # persimmon
  423. "model.layers.{bid}.self_attn.query_layernorm", # hunyuan
  424. "model.layers.{bid}.self_attn.q_norm", # cohere olmoe chameleon olmo2
  425. "transformer.blocks.{bid}.attn.q_ln", # sea-lion
  426. "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
  427. "transformer.layers.{bid}.attn.q_norm", # openelm
  428. ),
  429. MODEL_TENSOR.ATTN_K_NORM: (
  430. "language_model.encoder.layers.{bid}.self_attention.k_layernorm",
  431. "model.layers.{bid}.self_attn.k_layernorm", # persimmon
  432. "model.layers.{bid}.self_attn.key_layernorm", # hunyuan
  433. "model.layers.{bid}.self_attn.k_norm", # cohere olmoe chameleon olmo2
  434. "transformer.blocks.{bid}.attn.k_ln", # sea-lion
  435. "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
  436. "transformer.layers.{bid}.attn.k_norm", # openelm
  437. ),
  438. MODEL_TENSOR.ROPE_FREQS: (
  439. "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
  440. ),
  441. MODEL_TENSOR.LAYER_OUT_NORM: (
  442. "encoder.layer.{bid}.output.LayerNorm", # bert
  443. "transformer.layer.{bid}.output_layer_norm", # distillbert
  444. "encoder.layers.{bid}.norm2", # nomic-bert
  445. "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
  446. "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
  447. "encoder.layer.{bid}.layer_norm_2", # jina-v2-code
  448. ),
  449. MODEL_TENSOR.PER_LAYER_TOKEN_EMBD: (
  450. "model.embed_tokens_per_layer", # gemma3n
  451. ),
  452. MODEL_TENSOR.PER_LAYER_MODEL_PROJ: (
  453. "model.per_layer_model_projection", # gemma3n
  454. ),
  455. MODEL_TENSOR.PER_LAYER_PROJ_NORM: (
  456. "model.per_layer_projection_norm", # gemma3n
  457. ),
  458. MODEL_TENSOR.ALTUP_PROJ: (
  459. "model.altup_projections", # gemma3n
  460. ),
  461. MODEL_TENSOR.ALTUP_UNEMBD_PROJ: (
  462. "model.altup_unembed_projections", # gemma3n
  463. ),
  464. MODEL_TENSOR.PER_LAYER_INP_GATE: (
  465. "model.layers.{bid}.per_layer_input_gate", # gemma3n
  466. ),
  467. MODEL_TENSOR.PER_LAYER_PROJ: (
  468. "model.layers.{bid}.per_layer_projection", # gemma3n
  469. ),
  470. MODEL_TENSOR.PER_LAYER_POST_NORM: (
  471. "model.layers.{bid}.post_per_layer_input_norm", # gemma3n
  472. ),
  473. MODEL_TENSOR.ALTUP_CORRECT_COEF: (
  474. "model.layers.{bid}.altup.correction_coefs", # gemma3n
  475. ),
  476. MODEL_TENSOR.ALTUP_CORRECT_SCALE: (
  477. "model.layers.{bid}.altup.correct_output_scale", # gemma3n
  478. ),
  479. MODEL_TENSOR.ALTUP_PREDICT_COEF: (
  480. "model.layers.{bid}.altup.prediction_coefs", # gemma3n
  481. ),
  482. MODEL_TENSOR.ALTUP_ROUTER: (
  483. "model.layers.{bid}.altup.modality_router", # gemma3n
  484. ),
  485. MODEL_TENSOR.ALTUP_ROUTER_NORM: (
  486. "model.layers.{bid}.altup.router_norm", # gemma3n
  487. ),
  488. MODEL_TENSOR.LAUREL_L: (
  489. "model.layers.{bid}.laurel.linear_left", # gemma3n
  490. ),
  491. MODEL_TENSOR.LAUREL_R: (
  492. "model.layers.{bid}.laurel.linear_right", # gemma3n
  493. ),
  494. MODEL_TENSOR.LAUREL_POST_NORM: (
  495. "model.layers.{bid}.laurel.post_laurel_norm", # gemma3n
  496. ),
  497. MODEL_TENSOR.SSM_IN: (
  498. "model.layers.{bid}.in_proj",
  499. "backbone.layers.{bid}.mixer.in_proj",
  500. "model.layers.{bid}.mamba.in_proj",
  501. ),
  502. MODEL_TENSOR.SSM_CONV1D: (
  503. "model.layers.{bid}.conv1d",
  504. "backbone.layers.{bid}.mixer.conv1d",
  505. "model.layers.{bid}.mamba.conv1d",
  506. ),
  507. MODEL_TENSOR.SSM_X: (
  508. "model.layers.{bid}.x_proj",
  509. "backbone.layers.{bid}.mixer.x_proj",
  510. ),
  511. MODEL_TENSOR.SSM_DT: (
  512. "model.layers.{bid}.dt_proj",
  513. "backbone.layers.{bid}.mixer.dt_proj",
  514. "model.layers.{bid}.mamba.dt_proj",
  515. ),
  516. MODEL_TENSOR.SSM_A: (
  517. "model.layers.{bid}.A_log",
  518. "backbone.layers.{bid}.mixer.A_log",
  519. "model.layers.{bid}.mamba.A_log",
  520. ),
  521. MODEL_TENSOR.SSM_D: (
  522. "model.layers.{bid}.D",
  523. "backbone.layers.{bid}.mixer.D",
  524. "model.layers.{bid}.mamba.D",
  525. ),
  526. MODEL_TENSOR.SSM_NORM: (
  527. "model.layers.{bid}.mamba.norm", # falcon-h1
  528. "backbone.layers.{bid}.mixer.norm", # mamba2
  529. ),
  530. MODEL_TENSOR.SSM_OUT: (
  531. "model.layers.{bid}.out_proj",
  532. "backbone.layers.{bid}.mixer.out_proj",
  533. "model.layers.{bid}.mamba.out_proj", # falcon-h1
  534. ),
  535. MODEL_TENSOR.TIME_MIX_W0: (
  536. "model.layers.{bid}.attention.w0", # rwkv7
  537. ),
  538. MODEL_TENSOR.TIME_MIX_W1: (
  539. "rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv6
  540. "model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2
  541. "model.layers.{bid}.attention.w1", # rwkv7
  542. ),
  543. MODEL_TENSOR.TIME_MIX_W2: (
  544. "rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv6
  545. "model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2
  546. "model.layers.{bid}.attention.w2", # rwkv7
  547. ),
  548. MODEL_TENSOR.TIME_MIX_A0: (
  549. "model.layers.{bid}.attention.a0", # rwkv7
  550. ),
  551. MODEL_TENSOR.TIME_MIX_A1: (
  552. "model.layers.{bid}.attention.a1", # rwkv7
  553. ),
  554. MODEL_TENSOR.TIME_MIX_A2: (
  555. "model.layers.{bid}.attention.a2", # rwkv7
  556. ),
  557. MODEL_TENSOR.TIME_MIX_V0: (
  558. "model.layers.{bid}.attention.v0", # rwkv7
  559. ),
  560. MODEL_TENSOR.TIME_MIX_V1: (
  561. "model.layers.{bid}.attention.v1", # rwkv7
  562. ),
  563. MODEL_TENSOR.TIME_MIX_V2: (
  564. "model.layers.{bid}.attention.v2", # rwkv7
  565. ),
  566. MODEL_TENSOR.TIME_MIX_G1: (
  567. "model.layers.{bid}.attention.g1", # rwkv7
  568. ),
  569. MODEL_TENSOR.TIME_MIX_G2: (
  570. "model.layers.{bid}.attention.g2", # rwkv7
  571. ),
  572. MODEL_TENSOR.TIME_MIX_K_K: (
  573. "model.layers.{bid}.attention.k_k", # rwkv7
  574. ),
  575. MODEL_TENSOR.TIME_MIX_K_A: (
  576. "model.layers.{bid}.attention.k_a", # rwkv7
  577. ),
  578. MODEL_TENSOR.TIME_MIX_R_K: (
  579. "model.layers.{bid}.attention.r_k", # rwkv7
  580. ),
  581. MODEL_TENSOR.TIME_MIX_LERP_X: (
  582. "rwkv.blocks.{bid}.attention.time_maa_x", # rwkv6
  583. "model.layers.{bid}.self_attn.time_maa_x", # rwkv6qwen2
  584. ),
  585. MODEL_TENSOR.TIME_MIX_LERP_K: (
  586. "rwkv.blocks.{bid}.attention.time_maa_k", # rwkv6
  587. "model.layers.{bid}.self_attn.time_maa_k", # rwkv6qwen2
  588. ),
  589. MODEL_TENSOR.TIME_MIX_LERP_V: (
  590. "rwkv.blocks.{bid}.attention.time_maa_v", # rwkv6
  591. "model.layers.{bid}.self_attn.time_maa_v", # rwkv6qwen2
  592. ),
  593. MODEL_TENSOR.TIME_MIX_LERP_R: (
  594. "rwkv.blocks.{bid}.attention.time_maa_r", # rwkv6
  595. "model.layers.{bid}.self_attn.time_maa_r", # rwkv6qwen2
  596. ),
  597. MODEL_TENSOR.TIME_MIX_LERP_G: (
  598. "rwkv.blocks.{bid}.attention.time_maa_g", # rwkv6
  599. "model.layers.{bid}.self_attn.time_maa_g", # rwkv6qwen2
  600. ),
  601. MODEL_TENSOR.TIME_MIX_LERP_W: (
  602. "rwkv.blocks.{bid}.attention.time_maa_w", # rwkv6
  603. "model.layers.{bid}.self_attn.time_maa_w", # rwkv6qwen2
  604. ),
  605. MODEL_TENSOR.TIME_MIX_FIRST: (
  606. "rwkv.blocks.{bid}.attention.time_faaaa", # rwkv6
  607. ),
  608. MODEL_TENSOR.TIME_MIX_DECAY: (
  609. "rwkv.blocks.{bid}.attention.time_decay", # rwkv6
  610. "model.layers.{bid}.self_attn.time_decay", # rwkv6qwen2
  611. ),
  612. MODEL_TENSOR.TIME_MIX_DECAY_W1: (
  613. "rwkv.blocks.{bid}.attention.time_decay_w1", # rwkv6
  614. "model.layers.{bid}.self_attn.time_decay_w1", # rwkv6qwen2
  615. ),
  616. MODEL_TENSOR.TIME_MIX_DECAY_W2: (
  617. "rwkv.blocks.{bid}.attention.time_decay_w2", # rwkv6
  618. "model.layers.{bid}.self_attn.time_decay_w2", # rwkv6qwen2
  619. ),
  620. MODEL_TENSOR.TIME_MIX_KEY: (
  621. "rwkv.blocks.{bid}.attention.key", # rwkv6
  622. "model.layers.{bid}.self_attn.k_proj", # rwkv6qwen2
  623. "model.layers.{bid}.attention.key", # rwkv7
  624. "model.layers.{bid}.attention.k_proj", # rwkv7
  625. ),
  626. MODEL_TENSOR.TIME_MIX_VALUE: (
  627. "rwkv.blocks.{bid}.attention.value", # rwkv6
  628. "model.layers.{bid}.self_attn.v_proj", # rwkv6qwen2
  629. "model.layers.{bid}.attention.value", # rwkv7
  630. "model.layers.{bid}.attention.v_proj", # rwkv7
  631. ),
  632. MODEL_TENSOR.TIME_MIX_RECEPTANCE: (
  633. "rwkv.blocks.{bid}.attention.receptance", # rwkv6
  634. "model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2
  635. "model.layers.{bid}.attention.receptance", # rwkv7
  636. "model.layers.{bid}.attention.r_proj", # rwkv7
  637. ),
  638. MODEL_TENSOR.TIME_MIX_GATE: (
  639. "rwkv.blocks.{bid}.attention.gate", # rwkv6
  640. "model.layers.{bid}.self_attn.gate", # rwkv6qwen2
  641. ),
  642. MODEL_TENSOR.TIME_MIX_LN: (
  643. "rwkv.blocks.{bid}.attention.ln_x", # rwkv6
  644. "model.layers.{bid}.attention.ln_x" # rwkv7
  645. ),
  646. MODEL_TENSOR.TIME_MIX_OUTPUT: (
  647. "rwkv.blocks.{bid}.attention.output", # rwkv6
  648. "model.layers.{bid}.self_attn.o_proj", # rwkv6qwen2
  649. "model.layers.{bid}.attention.output", # rwkv7
  650. "model.layers.{bid}.attention.o_proj", # rwkv7
  651. ),
  652. MODEL_TENSOR.CHANNEL_MIX_LERP_K: (
  653. "rwkv.blocks.{bid}.feed_forward.time_maa_k", # rwkv6
  654. "model.layers.{bid}.feed_forward.x_k", # rwkv7
  655. ),
  656. MODEL_TENSOR.CHANNEL_MIX_LERP_R: (
  657. "rwkv.blocks.{bid}.feed_forward.time_maa_r", # rwkv6
  658. ),
  659. MODEL_TENSOR.CHANNEL_MIX_KEY: (
  660. "rwkv.blocks.{bid}.feed_forward.key", # rwkv6
  661. "model.layers.{bid}.feed_forward.key", # rwkv7
  662. ),
  663. MODEL_TENSOR.CHANNEL_MIX_RECEPTANCE: (
  664. "rwkv.blocks.{bid}.feed_forward.receptance", # rwkv6
  665. ),
  666. MODEL_TENSOR.CHANNEL_MIX_VALUE: (
  667. "rwkv.blocks.{bid}.feed_forward.value", # rwkv6
  668. "model.layers.{bid}.feed_forward.value", # rwkv7
  669. ),
  670. MODEL_TENSOR.ATTN_Q_A: (
  671. "model.layers.{bid}.self_attn.q_a_proj", # deepseek2
  672. ),
  673. MODEL_TENSOR.ATTN_Q_B: (
  674. "model.layers.{bid}.self_attn.q_b_proj", # deepseek2
  675. ),
  676. MODEL_TENSOR.ATTN_KV_A_MQA: (
  677. "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
  678. ),
  679. MODEL_TENSOR.ATTN_KV_B: (
  680. "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
  681. ),
  682. MODEL_TENSOR.ATTN_K_B: (
  683. "model.layers.{bid}.self_attn.k_b_proj", # deepseek2
  684. ),
  685. MODEL_TENSOR.ATTN_V_B: (
  686. "model.layers.{bid}.self_attn.v_b_proj", # deepseek2
  687. ),
  688. MODEL_TENSOR.ATTN_Q_A_NORM: (
  689. "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
  690. ),
  691. MODEL_TENSOR.ATTN_KV_A_NORM: (
  692. "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
  693. ),
  694. MODEL_TENSOR.ATTN_SUB_NORM: (
  695. "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
  696. ),
  697. MODEL_TENSOR.FFN_SUB_NORM: (
  698. "model.layers.{bid}.mlp.ffn_layernorm", # bitnet
  699. ),
  700. MODEL_TENSOR.DEC_ATTN_NORM: (
  701. "decoder.block.{bid}.layer.0.layer_norm", # t5
  702. ),
  703. MODEL_TENSOR.DEC_ATTN_Q: (
  704. "decoder.block.{bid}.layer.0.SelfAttention.q", # t5
  705. ),
  706. MODEL_TENSOR.DEC_ATTN_K: (
  707. "decoder.block.{bid}.layer.0.SelfAttention.k", # t5
  708. ),
  709. MODEL_TENSOR.DEC_ATTN_V: (
  710. "decoder.block.{bid}.layer.0.SelfAttention.v", # t5
  711. ),
  712. MODEL_TENSOR.DEC_ATTN_OUT: (
  713. "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
  714. ),
  715. MODEL_TENSOR.DEC_ATTN_REL_B: (
  716. "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  717. ),
  718. MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
  719. "decoder.block.{bid}.layer.1.layer_norm", # t5
  720. ),
  721. MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
  722. "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
  723. ),
  724. MODEL_TENSOR.DEC_CROSS_ATTN_K: (
  725. "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
  726. ),
  727. MODEL_TENSOR.DEC_CROSS_ATTN_V: (
  728. "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
  729. ),
  730. MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
  731. "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
  732. ),
  733. MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
  734. "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
  735. ),
  736. MODEL_TENSOR.DEC_FFN_NORM: (
  737. "decoder.block.{bid}.layer.2.layer_norm", # t5
  738. ),
  739. MODEL_TENSOR.DEC_FFN_GATE: (
  740. "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
  741. ),
  742. MODEL_TENSOR.DEC_FFN_UP: (
  743. "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
  744. "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
  745. ),
  746. MODEL_TENSOR.DEC_FFN_DOWN: (
  747. "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
  748. ),
  749. MODEL_TENSOR.DEC_OUTPUT_NORM: (
  750. "decoder.final_layer_norm", # t5
  751. ),
  752. MODEL_TENSOR.ENC_ATTN_NORM: (
  753. "encoder.block.{bid}.layer.0.layer_norm", # t5
  754. ),
  755. MODEL_TENSOR.ENC_ATTN_Q: (
  756. "encoder.block.{bid}.layer.0.SelfAttention.q", # t5
  757. ),
  758. MODEL_TENSOR.ENC_ATTN_K: (
  759. "encoder.block.{bid}.layer.0.SelfAttention.k", # t5
  760. ),
  761. MODEL_TENSOR.ENC_ATTN_V: (
  762. "encoder.block.{bid}.layer.0.SelfAttention.v", # t5
  763. ),
  764. MODEL_TENSOR.ENC_ATTN_OUT: (
  765. "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
  766. ),
  767. MODEL_TENSOR.ENC_ATTN_REL_B: (
  768. "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  769. ),
  770. MODEL_TENSOR.ENC_FFN_NORM: (
  771. "encoder.block.{bid}.layer.1.layer_norm", # t5
  772. ),
  773. MODEL_TENSOR.ENC_FFN_GATE: (
  774. "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
  775. ),
  776. MODEL_TENSOR.ENC_FFN_UP: (
  777. "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
  778. "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
  779. ),
  780. MODEL_TENSOR.ENC_FFN_DOWN: (
  781. "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
  782. ),
  783. ############################################################################
  784. # TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg
  785. MODEL_TENSOR.ENC_OUTPUT_NORM: (
  786. "encoder.final_layer_norm", # t5
  787. "layer_norm", # neobert
  788. ),
  789. MODEL_TENSOR.CLS: (
  790. "classifier", # jina
  791. "classifier.dense", # roberta
  792. "pre_classifier", # distillbert
  793. "dense", # neobert
  794. ),
  795. MODEL_TENSOR.CLS_OUT: (
  796. "classifier.out_proj", # roberta
  797. ),
  798. #############################################################################
  799. MODEL_TENSOR.CONVNEXT_DW: (
  800. "backbone.convnext.{bid}.dwconv", # wavtokenizer
  801. ),
  802. MODEL_TENSOR.CONVNEXT_NORM: (
  803. "backbone.convnext.{bid}.norm", # wavtokenizer
  804. ),
  805. MODEL_TENSOR.CONVNEXT_PW1: (
  806. "backbone.convnext.{bid}.pwconv1", # wavtokenizer
  807. ),
  808. MODEL_TENSOR.CONVNEXT_PW2: (
  809. "backbone.convnext.{bid}.pwconv2", # wavtokenizer
  810. ),
  811. MODEL_TENSOR.CONVNEXT_GAMMA: (
  812. "backbone.convnext.{bid}.gamma", # wavtokenizer
  813. ),
  814. MODEL_TENSOR.POSNET_CONV1: (
  815. "backbone.posnet.{bid}.conv1", # wavtokenizer
  816. ),
  817. MODEL_TENSOR.POSNET_CONV2: (
  818. "backbone.posnet.{bid}.conv2", # wavtokenizer
  819. ),
  820. MODEL_TENSOR.POSNET_NORM: (
  821. "backbone.posnet.{bid}.norm", # wavtokenizer
  822. ),
  823. MODEL_TENSOR.POSNET_NORM1: (
  824. "backbone.posnet.{bid}.norm1", # wavtokenizer
  825. ),
  826. MODEL_TENSOR.POSNET_NORM2: (
  827. "backbone.posnet.{bid}.norm2", # wavtokenizer
  828. ),
  829. MODEL_TENSOR.POSNET_ATTN_NORM: (
  830. "backbone.posnet.{bid}.norm", # wavtokenizer
  831. ),
  832. MODEL_TENSOR.POSNET_ATTN_Q: (
  833. "backbone.posnet.{bid}.q", # wavtokenizer
  834. ),
  835. MODEL_TENSOR.POSNET_ATTN_K: (
  836. "backbone.posnet.{bid}.k", # wavtokenizer
  837. ),
  838. MODEL_TENSOR.POSNET_ATTN_V: (
  839. "backbone.posnet.{bid}.v", # wavtokenizer
  840. ),
  841. MODEL_TENSOR.POSNET_ATTN_OUT: (
  842. "backbone.posnet.{bid}.proj_out", # wavtokenizer
  843. ),
  844. #############################################################################
  845. ## Vision encoder
  846. MODEL_TENSOR.V_MMPROJ: (
  847. "multi_modal_projector.linear_{bid}",
  848. "visual.merger.mlp.{bid}", # qwen2vl
  849. ),
  850. MODEL_TENSOR.V_MMPROJ_FC: (
  851. "model.connector.modality_projection.proj", # SmolVLM
  852. ),
  853. MODEL_TENSOR.V_MMPROJ_MLP: (
  854. "model.mm_projector.mlp.mlp.{bid}",
  855. "vision_model.vision_adapter.mlp.fc{bid}", # llama 4
  856. "mlp1.{bid}", # InternVL
  857. ),
  858. MODEL_TENSOR.V_MMPROJ_PEG: (
  859. "model.mm_projector.peg.peg.{bid}",
  860. ),
  861. MODEL_TENSOR.V_ENC_EMBD_CLS: (
  862. "vision_tower.vision_model.embeddings.class_embedding",
  863. "vision_model.class_embedding", # llama 4
  864. ),
  865. MODEL_TENSOR.V_ENC_EMBD_PATCH: (
  866. "vision_tower.vision_model.embeddings.patch_embedding",
  867. "vpm.embeddings.patch_embedding",
  868. "model.vision_model.embeddings.patch_embedding", # SmolVLM
  869. "vision_tower.patch_conv", # pixtral
  870. "vision_model.patch_embedding.linear", # llama 4
  871. "visual.patch_embed.proj", # qwen2vl
  872. ),
  873. MODEL_TENSOR.V_ENC_EMBD_POS: (
  874. "vision_tower.vision_model.embeddings.position_embedding",
  875. "vpm.embeddings.position_embedding",
  876. "model.vision_model.embeddings.position_embedding", # SmolVLM
  877. "vision_model.positional_embedding_vlm", # llama 4
  878. ),
  879. MODEL_TENSOR.V_ENC_ATTN_Q: (
  880. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.q_proj",
  881. "vpm.encoder.layers.{bid}.self_attn.q_proj",
  882. "model.vision_model.encoder.layers.{bid}.self_attn.q_proj", # SmolVLM
  883. "vision_model.model.layers.{bid}.self_attn.q_proj", # llama4
  884. "vision_tower.transformer.layers.{bid}.attention.q_proj", # pixtral
  885. "visual.blocks.{bid}.attn.q", # qwen2vl, generated
  886. ),
  887. MODEL_TENSOR.V_ENC_ATTN_Q_NORM: (
  888. "vision_tower.vision_model.encoder.layers.{bid}.attn.q_norm", # InternVL
  889. ),
  890. MODEL_TENSOR.V_ENC_ATTN_K: (
  891. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.k_proj",
  892. "vpm.encoder.layers.{bid}.self_attn.k_proj",
  893. "model.vision_model.encoder.layers.{bid}.self_attn.k_proj", # SmolVLM
  894. "vision_model.model.layers.{bid}.self_attn.k_proj", # llama4
  895. "vision_tower.transformer.layers.{bid}.attention.k_proj", # pixtral
  896. "visual.blocks.{bid}.attn.k", # qwen2vl, generated
  897. ),
  898. MODEL_TENSOR.V_ENC_ATTN_K_NORM: (
  899. "vision_tower.vision_model.encoder.layers.{bid}.attn.k_norm", # InternVL
  900. ),
  901. MODEL_TENSOR.V_ENC_ATTN_V: (
  902. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.v_proj",
  903. "vpm.encoder.layers.{bid}.self_attn.v_proj",
  904. "model.vision_model.encoder.layers.{bid}.self_attn.v_proj", # SmolVLM
  905. "vision_model.model.layers.{bid}.self_attn.v_proj", # llama4
  906. "vision_tower.transformer.layers.{bid}.attention.v_proj", # pixtral
  907. "visual.blocks.{bid}.attn.v", # qwen2vl, generated
  908. ),
  909. MODEL_TENSOR.V_ENC_INPUT_NORM: (
  910. "vision_tower.vision_model.encoder.layers.{bid}.layer_norm1",
  911. "vision_tower.vision_model.encoder.layers.{bid}.norm1", # InternVL
  912. "vpm.encoder.layers.{bid}.layer_norm1",
  913. "model.vision_model.encoder.layers.{bid}.layer_norm1", # SmolVLM
  914. "vision_tower.transformer.layers.{bid}.attention_norm", # pixtral
  915. "vision_model.model.layers.{bid}.input_layernorm", # llama4
  916. "visual.blocks.{bid}.norm1", # qwen2vl
  917. ),
  918. MODEL_TENSOR.V_ENC_ATTN_O: (
  919. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.out_proj",
  920. "vision_tower.vision_model.encoder.layers.{bid}.attn.proj", # InternVL
  921. "vpm.encoder.layers.{bid}.self_attn.out_proj",
  922. "model.vision_model.encoder.layers.{bid}.self_attn.out_proj", # SmolVLM
  923. "vision_model.model.layers.{bid}.self_attn.o_proj", # llama4
  924. "vision_tower.transformer.layers.{bid}.attention.o_proj", # pixtral
  925. "visual.blocks.{bid}.attn.proj", # qwen2vl
  926. ),
  927. MODEL_TENSOR.V_ENC_POST_ATTN_NORM: (
  928. "vision_tower.vision_model.encoder.layers.{bid}.layer_norm2",
  929. "vision_tower.vision_model.encoder.layers.{bid}.norm2", # InternVL
  930. "vpm.encoder.layers.{bid}.layer_norm2",
  931. "model.vision_model.encoder.layers.{bid}.layer_norm2", # SmolVLM
  932. "vision_model.model.layers.{bid}.post_attention_layernorm", # llama4
  933. "vision_tower.transformer.layers.{bid}.ffn_norm", # pixtral
  934. "visual.blocks.{bid}.norm2", # qwen2vl
  935. ),
  936. MODEL_TENSOR.V_ENC_FFN_UP: (
  937. "vision_tower.vision_model.encoder.layers.{bid}.mlp.fc1",
  938. "vpm.encoder.layers.{bid}.mlp.fc1",
  939. "model.vision_model.encoder.layers.{bid}.mlp.fc1", # SmolVLM, gemma3
  940. "vision_tower.transformer.layers.{bid}.feed_forward.up_proj", # pixtral
  941. "vision_model.model.layers.{bid}.mlp.fc1", # llama4
  942. "visual.blocks.{bid}.mlp.fc1", # qwen2vl
  943. "visual.blocks.{bid}.mlp.up_proj", # qwen2.5vl
  944. ),
  945. MODEL_TENSOR.V_ENC_FFN_GATE: (
  946. "vision_tower.transformer.layers.{bid}.feed_forward.gate_proj", # pixtral
  947. "visual.blocks.{bid}.mlp.gate_proj", # qwen2.5vl
  948. ),
  949. MODEL_TENSOR.V_ENC_FFN_DOWN: (
  950. "vision_tower.vision_model.encoder.layers.{bid}.mlp.fc2",
  951. "vpm.encoder.layers.{bid}.mlp.fc2",
  952. "model.vision_model.encoder.layers.{bid}.mlp.fc2", # SmolVLM, gemma3
  953. "vision_tower.transformer.layers.{bid}.feed_forward.down_proj", # pixtral
  954. "vision_model.model.layers.{bid}.mlp.fc2", # llama4
  955. "visual.blocks.{bid}.mlp.fc2", # qwen2vl
  956. "visual.blocks.{bid}.mlp.down_proj", # qwen2.5vl
  957. ),
  958. MODEL_TENSOR.V_LAYER_SCALE_1: (
  959. "vision_tower.vision_model.encoder.layers.{bid}.ls1", # InternVL
  960. ),
  961. MODEL_TENSOR.V_LAYER_SCALE_2: (
  962. "vision_tower.vision_model.encoder.layers.{bid}.ls2", # InternVL
  963. ),
  964. MODEL_TENSOR.V_PRE_NORM: (
  965. "vision_tower.vision_model.pre_layrnorm",
  966. "vision_tower.ln_pre", # pixtral
  967. "vision_model.layernorm_pre", # llama4
  968. ),
  969. MODEL_TENSOR.V_POST_NORM: (
  970. "vision_tower.vision_model.post_layernorm",
  971. "model.vision_model.post_layernorm", # SmolVLM
  972. "vision_model.layernorm_post", # llama4
  973. "visual.merger.ln_q", # qwen2vl
  974. ),
  975. MODEL_TENSOR.V_MM_INP_PROJ: (
  976. "multi_modal_projector.mm_input_projection",
  977. ),
  978. MODEL_TENSOR.V_MM_INP_NORM: (
  979. "multi_modal_projector.norm",
  980. ),
  981. MODEL_TENSOR.V_MM_SOFT_EMB_NORM: (
  982. "multi_modal_projector.mm_soft_emb_norm",
  983. ),
  984. MODEL_TENSOR.V_RESMPL_POS_EMBD_K: (
  985. "resampler.pos_embed_k",
  986. ),
  987. MODEL_TENSOR.V_RESMPL_ATTN_Q: (
  988. "resampler.attn.in_proj_q", # tensor generated from resampler.attn.in_proj
  989. ),
  990. MODEL_TENSOR.V_RESMPL_ATTN_K: (
  991. "resampler.attn.in_proj_k", # tensor generated from resampler.attn.in_proj
  992. ),
  993. MODEL_TENSOR.V_RESMPL_ATTN_V: (
  994. "resampler.attn.in_proj_v", # tensor generated from resampler.attn.in_proj
  995. ),
  996. MODEL_TENSOR.V_RESMPL_ATTN_OUT: (
  997. "resampler.attn.out_proj",
  998. ),
  999. MODEL_TENSOR.V_RESMPL_KV: (
  1000. "resampler.kv_proj",
  1001. ),
  1002. MODEL_TENSOR.V_RESMPL_POST_NORM: (
  1003. "resampler.ln_post",
  1004. ),
  1005. MODEL_TENSOR.V_RESMPL_KV_NORM: (
  1006. "resampler.ln_kv",
  1007. ),
  1008. MODEL_TENSOR.V_RESMPL_Q_NORM: (
  1009. "resampler.ln_q",
  1010. ),
  1011. MODEL_TENSOR.V_RESMPL_PROJ: (
  1012. "resampler.proj",
  1013. ),
  1014. MODEL_TENSOR.V_RESMPL_QUERY: (
  1015. "resampler.query",
  1016. ),
  1017. MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK: (
  1018. "v.token_embd.img_break", # for pixtral, this is a generated vector
  1019. ),
  1020. MODEL_TENSOR.V_MM_PATCH_MERGER: (
  1021. "multi_modal_projector.patch_merger.merging_layer", # mistral small 3.1
  1022. ),
  1023. # audio (mtmd)
  1024. MODEL_TENSOR.A_ENC_EMBD_POS: (
  1025. "audio_tower.embed_positions", # ultravox
  1026. ),
  1027. MODEL_TENSOR.A_ENC_CONV1D: (
  1028. "audio_tower.conv{bid}", # ultravox
  1029. ),
  1030. MODEL_TENSOR.A_PRE_NORM: (),
  1031. MODEL_TENSOR.A_POST_NORM: (
  1032. "audio_tower.layer_norm", # ultravox
  1033. "audio_tower.ln_post", # qwen2omni
  1034. ),
  1035. MODEL_TENSOR.A_ENC_ATTN_Q: (
  1036. "audio_tower.layers.{bid}.self_attn.q_proj", # ultravox
  1037. ),
  1038. MODEL_TENSOR.A_ENC_ATTN_K: (
  1039. "audio_tower.layers.{bid}.self_attn.k_proj", # ultravox
  1040. ),
  1041. MODEL_TENSOR.A_ENC_ATTN_V: (
  1042. "audio_tower.layers.{bid}.self_attn.v_proj", # ultravox
  1043. ),
  1044. MODEL_TENSOR.A_ENC_INPUT_NORM: (
  1045. "audio_tower.layers.{bid}.self_attn_layer_norm", # ultravox
  1046. ),
  1047. MODEL_TENSOR.A_ENC_OUTPUT: (
  1048. "audio_tower.layers.{bid}.self_attn.out_proj", # ultravox
  1049. ),
  1050. MODEL_TENSOR.A_ENC_OUTPUT_NORM: (
  1051. "audio_tower.layers.{bid}.final_layer_norm", # ultravox
  1052. ),
  1053. MODEL_TENSOR.A_ENC_FFN_UP: (
  1054. "audio_tower.layers.{bid}.fc1", # ultravox
  1055. ),
  1056. MODEL_TENSOR.A_ENC_FFN_GATE: (),
  1057. MODEL_TENSOR.A_ENC_FFN_DOWN: (
  1058. "audio_tower.layers.{bid}.fc2", # ultravox
  1059. ),
  1060. # note: some tensors below has "audio." pseudo-prefix, to prevent conflicts with vision tensors
  1061. # this prefix is added in the conversion code in modify_tensors()
  1062. MODEL_TENSOR.A_MMPROJ: (
  1063. "audio.multi_modal_projector.linear_{bid}", # ultravox
  1064. ),
  1065. MODEL_TENSOR.A_MMPROJ_FC: (
  1066. "audio.multi_modal_projector.linear", # qwen2audio
  1067. "audio_tower.proj", # qwen2omni
  1068. ),
  1069. MODEL_TENSOR.A_MM_NORM_PRE: (
  1070. "audio.multi_modal_projector.ln_pre", # ultravox
  1071. ),
  1072. MODEL_TENSOR.A_MM_NORM_MID: (
  1073. "audio.multi_modal_projector.ln_mid", # ultravox
  1074. ),
  1075. }
  1076. # architecture-specific block mappings
  1077. arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
  1078. MODEL_ARCH.ARCTIC: {
  1079. MODEL_TENSOR.FFN_NORM: (
  1080. "model.layers.{bid}.residual_layernorm",
  1081. ),
  1082. MODEL_TENSOR.FFN_NORM_EXP: (
  1083. "model.layers.{bid}.post_attention_layernorm",
  1084. ),
  1085. },
  1086. }
  1087. mapping: dict[str, tuple[MODEL_TENSOR, str]]
  1088. def __init__(self, arch: MODEL_ARCH, n_blocks: int):
  1089. self.mapping = {}
  1090. for tensor, keys in self.mappings_cfg.items():
  1091. if tensor not in MODEL_TENSORS[arch]:
  1092. continue
  1093. tensor_name = TENSOR_NAMES[tensor]
  1094. self.mapping[tensor_name] = (tensor, tensor_name)
  1095. for key in keys:
  1096. self.mapping[key] = (tensor, tensor_name)
  1097. if arch in self.arch_block_mappings_cfg:
  1098. self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
  1099. for bid in range(n_blocks):
  1100. for tensor, keys in self.block_mappings_cfg.items():
  1101. if tensor not in MODEL_TENSORS[arch]:
  1102. continue
  1103. tensor_name = TENSOR_NAMES[tensor].format(bid = bid)
  1104. self.mapping[tensor_name] = (tensor, tensor_name)
  1105. for key in keys:
  1106. key = key.format(bid = bid)
  1107. self.mapping[key] = (tensor, tensor_name)
  1108. def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
  1109. result = self.mapping.get(key)
  1110. if result is not None:
  1111. return result
  1112. for suffix in try_suffixes:
  1113. if key.endswith(suffix):
  1114. result = self.mapping.get(key[:-len(suffix)])
  1115. if result is not None:
  1116. return result[0], result[1] + suffix
  1117. return None
  1118. def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
  1119. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  1120. if result is None:
  1121. return None
  1122. return result[1]
  1123. def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
  1124. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  1125. if result is None:
  1126. return None
  1127. return result[0]
  1128. def __getitem__(self, key: str) -> str:
  1129. try:
  1130. return self.mapping[key][1]
  1131. except KeyError:
  1132. raise KeyError(key)
  1133. def __contains__(self, key: str) -> bool:
  1134. return key in self.mapping
  1135. def __repr__(self) -> str:
  1136. return repr(self.mapping)
  1137. def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
  1138. return TensorNameMap(arch, n_blocks)