tensor_mapping.py 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553
  1. from __future__ import annotations
  2. from typing import Sequence
  3. from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
  4. class TensorNameMap:
  5. mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  6. # Token embeddings
  7. MODEL_TENSOR.TOKEN_EMBD: (
  8. "gpt_neox.embed_in", # gptneox
  9. "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone
  10. "transformer.word_embeddings", # falcon
  11. "word_embeddings", # bloom
  12. "model.embed_tokens", # llama-hf nemotron olmoe olmo2 rwkv6qwen2 glm4-0414 plamo2 granite-hybrid
  13. "embed_tokens", # embeddinggemma
  14. "tok_embeddings", # llama-pth
  15. "embeddings.word_embeddings", # bert nomic-bert
  16. "language_model.embedding.word_embeddings", # persimmon
  17. "wte", # gpt2
  18. "transformer.embd.wte", # phi2
  19. "model.tok_embeddings", # internlm2
  20. "model.embedding", # mamba-qbert
  21. "backbone.embedding", # mamba
  22. "backbone.embeddings", # mamba-hf
  23. "transformer.in_out_embed", # Grok
  24. "embedding.word_embeddings", # chatglm
  25. "transformer.token_embeddings", # openelm
  26. "shared", # t5
  27. "rwkv.embeddings", # rwkv6
  28. "model.embeddings", # rwkv7
  29. "model.word_embeddings", # bailingmoe
  30. "language_model.model.embed_tokens", # llama4
  31. "encoder", # neobert
  32. "model.transformer.wte", # llada
  33. "embed_tokens", # qwen3-embedding
  34. ),
  35. # Token type embeddings
  36. MODEL_TENSOR.TOKEN_TYPES: (
  37. "embeddings.token_type_embeddings", # bert nomic-bert
  38. ),
  39. # Normalization of token embeddings
  40. MODEL_TENSOR.TOKEN_EMBD_NORM: (
  41. "word_embeddings_layernorm", # bloom
  42. "embeddings.LayerNorm", # bert
  43. "emb_ln", # nomic-bert
  44. "transformer.norm", # openelm
  45. "rwkv.blocks.0.pre_ln", # rwkv
  46. "rwkv.blocks.0.pre_ln", # rwkv6
  47. "model.pre_ln", # rwkv7
  48. "model.layers.0.pre_norm", # rwkv7
  49. "backbone.norm", # wavtokenizer
  50. "model.embedding_norm", # lfm2
  51. ),
  52. # Position embeddings
  53. MODEL_TENSOR.POS_EMBD: (
  54. "transformer.wpe", # gpt2
  55. "embeddings.position_embeddings", # bert
  56. "wpe", # gpt2
  57. ),
  58. # Output
  59. MODEL_TENSOR.OUTPUT: (
  60. "embed_out", # gptneox
  61. "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 phimoe plamo2
  62. "output", # llama-pth bloom internlm2
  63. "word_embeddings_for_head", # persimmon
  64. "lm_head.linear", # phi2
  65. "output_layer", # chatglm
  66. "head", # rwkv
  67. "head.out", # wavtokenizer
  68. "lm_head", # llama4
  69. "model.transformer.ff_out", # llada
  70. ),
  71. # Output norm
  72. MODEL_TENSOR.OUTPUT_NORM: (
  73. "gpt_neox.final_layer_norm", # gptneox
  74. "transformer.ln_f", # gpt2 gpt-j falcon jais exaone
  75. "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 phimoe plamo2
  76. "norm", # llama-pth
  77. "transformer.norm_f", # mpt dbrx
  78. "ln_f", # refact bloom qwen gpt2
  79. "language_model.encoder.final_layernorm", # persimmon
  80. "model.final_layernorm", # persimmon
  81. "lm_head.ln", # phi2
  82. "model.norm_f", # mamba-qbert
  83. "backbone.norm_f", # mamba
  84. "transformer.rms_norm", # Grok
  85. "encoder.final_layernorm", # chatglm
  86. "transformer.norm", # openelm
  87. "model.norm", # nemotron
  88. "rwkv.ln_out", # rwkv6
  89. "model.ln_out", # rwkv7
  90. "backbone.final_layer_norm", # wavtokenizer
  91. "model.norm", # llama4
  92. "model.transformer.ln_f", # llada
  93. ),
  94. # Rope frequencies
  95. MODEL_TENSOR.ROPE_FREQS: (
  96. "rope.freqs", # llama-pth
  97. "rotary_pos_emb.inv_freq", # chatglm
  98. ),
  99. MODEL_TENSOR.ROPE_FACTORS_LONG: (),
  100. MODEL_TENSOR.ROPE_FACTORS_SHORT: (),
  101. MODEL_TENSOR.CONV1D: (
  102. "backbone.embed", # roberta
  103. ),
  104. }
  105. block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
  106. # Attention norm
  107. MODEL_TENSOR.ATTN_NORM: (
  108. "gpt_neox.layers.{bid}.input_layernorm", # gptneox
  109. "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone
  110. "transformer.blocks.{bid}.norm_1", # mpt
  111. "transformer.h.{bid}.input_layernorm", # falcon7b
  112. "h.{bid}.input_layernorm", # bloom
  113. "transformer.h.{bid}.ln_mlp", # falcon40b
  114. "model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe phimoe granite-hybrid
  115. "layers.{bid}.attention_norm", # llama-pth
  116. "language_model.encoder.layers.{bid}.input_layernorm", # persimmon
  117. "model.layers.{bid}.ln1", # yi
  118. "h.{bid}.ln_1", # gpt2
  119. "transformer.h.{bid}.ln", # phi2
  120. "model.layers.layers.{bid}.norm", # plamo
  121. "model.layers.layers.{bid}.pre_mixer_norm", # plamo2
  122. "model.layers.{bid}.attention_norm", # internlm2
  123. "model.layers.{bid}.norm", # mamba-qbert
  124. "backbone.layers.{bid}.norm", # mamba
  125. "transformer.decoder_layer.{bid}.rms_norm", # Grok
  126. "model.layers.{bid}.pre_attn_norm", # grok-2
  127. "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
  128. "encoder.layers.{bid}.input_layernorm", # chatglm
  129. "transformer.layers.{bid}.attn_norm", # openelm
  130. "rwkv.blocks.{bid}.ln1", # rwkv6
  131. "model.layers.{bid}.ln1", # rwkv7
  132. "model.layers.{bid}.input_layernorm", # llama4
  133. "layers.{bid}.input_layernorm", # embeddinggemma
  134. "transformer_encoder.{bid}.attention_norm", # neobert
  135. "model.layers.{bid}.operator_norm", # lfm2
  136. "model.transformer.blocks.{bid}.attn_norm", # llada
  137. "layers.{bid}.input_layernorm", # qwen3-embedding
  138. ),
  139. # Attention norm 2
  140. MODEL_TENSOR.ATTN_NORM_2: (
  141. "transformer.h.{bid}.ln_attn", # falcon40b
  142. "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
  143. "rwkv.blocks.{bid}.ln2", # rwkv6
  144. "model.layers.{bid}.ln2", # rwkv7
  145. ),
  146. # Attention query-key-value
  147. MODEL_TENSOR.ATTN_QKV: (
  148. "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
  149. "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
  150. "transformer.blocks.{bid}.attn.Wqkv", # mpt
  151. "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
  152. "transformer.h.{bid}.self_attention.query_key_value", # falcon
  153. "h.{bid}.self_attention.query_key_value", # bloom
  154. "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
  155. "model.layers.{bid}.self_attn.query_key_value", # persimmon
  156. "h.{bid}.attn.c_attn", # gpt2
  157. "transformer.h.{bid}.mixer.Wqkv", # phi2
  158. "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
  159. "encoder.layers.{bid}.mixer.Wqkv", # jina
  160. "model.layers.{bid}.self_attn.qkv_proj", # phi3
  161. "model.layers.layers.{bid}.mixer.qkv_proj", # plamo2
  162. "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
  163. "transformer.layers.{bid}.attn.qkv_proj", # openelm
  164. "transformer_encoder.{bid}.qkv", # neobert
  165. ),
  166. # Attention query
  167. MODEL_TENSOR.ATTN_Q: (
  168. "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 phimoe
  169. "layers.{bid}.self_attn.q_proj", # embeddinggemma
  170. "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom
  171. "layers.{bid}.attention.wq", # llama-pth
  172. "encoder.layer.{bid}.attention.self.query", # bert
  173. "transformer.layer.{bid}.attention.q_lin", # distillbert
  174. "transformer.h.{bid}.attn.q_proj", # gpt-j
  175. "model.layers.layers.{bid}.self_attn.q_proj", # plamo
  176. "model.layers.{bid}.attention.wq", # internlm2
  177. "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
  178. "transformer.h.{bid}.attn.attention.q_proj", # exaone
  179. "model.layers.{bid}.self_attn.q_proj", # llama4
  180. "model.transformer.blocks.{bid}.q_proj", # llada
  181. "layers.{bid}.self_attn.q_proj", # qwen3-embedding
  182. "backbone.layers.{bid}.mixer.q_proj", # nemotron-h
  183. ),
  184. # Attention key
  185. MODEL_TENSOR.ATTN_K: (
  186. "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 phimoe
  187. "layers.{bid}.self_attn.k_proj", # embeddinggemma
  188. "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom
  189. "layers.{bid}.attention.wk", # llama-pth
  190. "encoder.layer.{bid}.attention.self.key", # bert
  191. "transformer.layer.{bid}.attention.k_lin", # distillbert
  192. "transformer.h.{bid}.attn.k_proj", # gpt-j
  193. "transformer.h.{bid}.attn.k", # refact
  194. "model.layers.layers.{bid}.self_attn.k_proj", # plamo
  195. "model.layers.{bid}.attention.wk", # internlm2
  196. "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
  197. "transformer.h.{bid}.attn.attention.k_proj", # exaone
  198. "model.layers.{bid}.self_attn.k_proj", # llama4
  199. "model.transformer.blocks.{bid}.k_proj", # llada
  200. "layers.{bid}.self_attn.k_proj", # qwen3-embedding
  201. "backbone.layers.{bid}.mixer.k_proj", # nemotron-h
  202. ),
  203. # Attention value
  204. MODEL_TENSOR.ATTN_V: (
  205. "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe
  206. "layers.{bid}.self_attn.v_proj", # embeddinggemma
  207. "layers.{bid}.attention.wv", # llama-pth
  208. "encoder.layer.{bid}.attention.self.value", # bert
  209. "transformer.layer.{bid}.attention.v_lin", # distillbert
  210. "transformer.h.{bid}.attn.v_proj", # gpt-j
  211. "transformer.h.{bid}.attn.v", # refact
  212. "model.layers.layers.{bid}.self_attn.v_proj", # plamo
  213. "model.layers.{bid}.attention.wv", # internlm2
  214. "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
  215. "transformer.h.{bid}.attn.attention.v_proj", # exaone
  216. "model.layers.{bid}.self_attn.v_proj", # llama4
  217. "model.transformer.blocks.{bid}.v_proj", # llada
  218. "layers.{bid}.self_attn.v_proj", # qwen3-embedding
  219. "backbone.layers.{bid}.mixer.v_proj", # nemotron-h
  220. ),
  221. # Attention output
  222. MODEL_TENSOR.ATTN_OUT: (
  223. "gpt_neox.layers.{bid}.attention.dense", # gptneox
  224. "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
  225. "transformer.blocks.{bid}.attn.out_proj", # mpt
  226. "transformer.h.{bid}.self_attention.dense", # falcon
  227. "h.{bid}.self_attention.dense", # bloom
  228. "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 phimoe
  229. "layers.{bid}.self_attn.o_proj", # embeddinggemma
  230. "model.layers.{bid}.self_attn.out_proj", # lfm2
  231. "model.layers.{bid}.self_attn.linear_attn", # deci
  232. "layers.{bid}.attention.wo", # llama-pth
  233. "encoder.layer.{bid}.attention.output.dense", # bert
  234. "transformer.layer.{bid}.attention.out_lin", # distillbert
  235. "transformer.h.{bid}.attn.out_proj", # gpt-j
  236. "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
  237. "model.layers.{bid}.self_attn.dense", # persimmon
  238. "h.{bid}.attn.c_proj", # gpt2
  239. "transformer.h.{bid}.mixer.out_proj", # phi2
  240. "model.layers.layers.{bid}.self_attn.o_proj", # plamo
  241. "model.layers.layers.{bid}.mixer.o_proj", # plamo2
  242. "model.layers.{bid}.attention.wo", # internlm2
  243. "encoder.layers.{bid}.attn.out_proj", # nomic-bert
  244. "encoder.layers.{bid}.mixer.out_proj", # jina
  245. "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
  246. "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
  247. "encoder.layers.{bid}.self_attention.dense", # chatglm
  248. "transformer.layers.{bid}.attn.out_proj", # openelm
  249. "transformer.h.{bid}.attn.attention.out_proj", # exaone
  250. "model.layers.{bid}.self_attn.o_proj", # llama4
  251. "transformer_encoder.{bid}.wo", # neobert
  252. "model.transformer.blocks.{bid}.attn_out", # llada
  253. "layers.{bid}.self_attn.o_proj", # qwen3-embedding
  254. "backbone.layers.{bid}.mixer.o_proj", # nemotron-h
  255. ),
  256. # Attention output norm
  257. MODEL_TENSOR.ATTN_OUT_NORM: (
  258. "encoder.layer.{bid}.attention.output.LayerNorm", # bert
  259. "transformer.layer.{bid}.sa_layer_norm", # distillbert
  260. "encoder.layers.{bid}.norm1", # nomic-bert
  261. "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
  262. "model.layers.{bid}.post_attn_norm", # grok-2
  263. "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
  264. ),
  265. MODEL_TENSOR.ATTN_POST_NORM: (
  266. "model.layers.{bid}.post_attention_layernorm", # gemma2 olmo2 # ge
  267. "layers.{bid}.post_attention_layernorm", # embeddinggemma
  268. "model.layers.{bid}.post_self_attn_layernorm", # glm-4-0414
  269. "model.layers.layers.{bid}.post_mixer_norm.weight", # plamo2
  270. ),
  271. # Rotary embeddings
  272. MODEL_TENSOR.ATTN_ROT_EMBD: (
  273. "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
  274. "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
  275. "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
  276. "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
  277. ),
  278. MODEL_TENSOR.ATTN_SINKS: (
  279. "model.layers.{bid}.self_attn.sinks", # openai-moe
  280. ),
  281. # Feed-forward norm
  282. MODEL_TENSOR.FFN_NORM: (
  283. "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
  284. "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone
  285. "h.{bid}.post_attention_layernorm", # bloom
  286. "transformer.blocks.{bid}.norm_2", # mpt
  287. "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe phimoe
  288. "layers.{bid}.ffn_norm", # llama-pth
  289. "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
  290. "model.layers.{bid}.ln2", # yi
  291. "h.{bid}.ln_2", # gpt2
  292. "model.layers.{bid}.ffn_norm", # internlm2
  293. "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
  294. "model.layers.{bid}.pre_moe_norm", # grok-2
  295. "encoder.layers.{bid}.post_attention_layernorm", # chatglm
  296. "transformer.layers.{bid}.ffn_norm", # openelm
  297. "model.layers.{bid}.pre_ff_layernorm", # jamba granite-hybrid
  298. "model.layers.{bid}.pre_moe_layernorm", # mini-jamba
  299. "model.layers.{bid}.post_attention_layernorm", # llama4
  300. "transformer_encoder.{bid}.ffn_norm", # neobert
  301. "model.layers.layers.{bid}.pre_mlp_norm", # plamo2
  302. "model.transformer.blocks.{bid}.ff_norm", # llada
  303. "layers.{bid}.post_attention_layernorm", # qwen3-embedding
  304. ),
  305. # Post feed-forward norm
  306. MODEL_TENSOR.FFN_PRE_NORM: (
  307. "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
  308. "layers.{bid}.pre_feedforward_layernorm", # embeddinggemma
  309. "model.layers.{bid}.pre_ff_layernorm.weight",
  310. ),
  311. # Post feed-forward norm
  312. MODEL_TENSOR.FFN_POST_NORM: (
  313. "model.layers.{bid}.post_feedforward_layernorm", # gemma2 olmo2
  314. "layers.{bid}.post_feedforward_layernorm", # embeddinggemma
  315. "model.layers.{bid}.post_mlp_layernorm", # glm-4-0414
  316. "model.layers.layers.{bid}.post_mlp_norm.weight", # plamo2
  317. "model.layers.{bid}.feed_forward.up_proj",
  318. "model.layers.{bid}.post_moe_norm", # grok-2
  319. ),
  320. MODEL_TENSOR.FFN_GATE_INP: (
  321. "layers.{bid}.feed_forward.gate", # mixtral
  322. "model.layers.{bid}.block_sparse_moe.gate", # mixtral phimoe
  323. "model.layers.{bid}.mlp.gate", # qwen2moe olmoe
  324. "transformer.decoder_layer.{bid}.router", # Grok
  325. "transformer.blocks.{bid}.ffn.router.layer", # dbrx
  326. "model.layers.{bid}.block_sparse_moe.router.layer", # granitemoe
  327. "model.layers.{bid}.feed_forward.router", # llama4 jamba
  328. "encoder.layers.{bid}.mlp.router.layer", # nomic-bert-moe
  329. "model.layers.{bid}.mlp.router", # openai-moe
  330. "model.layers.{bid}.mlp.gate.wg", # hunyuan
  331. "model.layers.{bid}.block_sparse_moe.primary_router", # smallthinker
  332. ),
  333. MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
  334. "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
  335. ),
  336. MODEL_TENSOR.FFN_EXP_PROBS_B: (
  337. "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 dots1
  338. "model.layers.{bid}.mlp.moe_statics.e_score_correction", # ernie4.5-moe
  339. ),
  340. # Feed-forward up
  341. MODEL_TENSOR.FFN_UP: (
  342. "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
  343. "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
  344. "transformer.blocks.{bid}.ffn.up_proj", # mpt
  345. "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
  346. "h.{bid}.mlp.dense_h_to_4h", # bloom
  347. "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron olmo2
  348. "layers.{bid}.mlp.up_proj", # embeddinggemma
  349. "layers.{bid}.feed_forward.w3", # llama-pth
  350. "encoder.layer.{bid}.intermediate.dense", # bert
  351. "transformer.layer.{bid}.ffn.lin1", # distillbert
  352. "transformer.h.{bid}.mlp.fc_in", # gpt-j
  353. "transformer.h.{bid}.mlp.linear_3", # refact
  354. "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  355. "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
  356. "transformer.h.{bid}.mlp.w1", # qwen
  357. "h.{bid}.mlp.c_fc", # gpt2
  358. "transformer.h.{bid}.mlp.fc1", # phi2
  359. "model.layers.{bid}.mlp.fc1", # phi2
  360. "model.layers.{bid}.mlp.gate_up_proj", # phi3 glm-4-0414
  361. "model.layers.layers.{bid}.mlp.up_proj", # plamo
  362. "model.layers.layers.{bid}.mlp.gate_up_proj", # plamo2
  363. "model.layers.{bid}.feed_forward.w3", # internlm2
  364. "encoder.layers.{bid}.mlp.fc11", # nomic-bert
  365. "encoder.layers.{bid}.mlp.fc1", # nomic-bert-moe
  366. "model.layers.{bid}.mlp.c_fc", # starcoder2
  367. "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2 (split up/gate, no longer used)
  368. "encoder.layer.{bid}.mlp.gated_layers", # jina-bert-v2 (GEGLU)
  369. "encoder.layer.{bid}.mlp.up_gated_layer", # jina-v2-code (GEGLU)
  370. "model.layers.{bid}.residual_mlp.w3", # arctic
  371. "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
  372. "transformer.h.{bid}.mlp.c_fc_1", # exaone
  373. "model.layers.{bid}.feed_forward.up_proj", # llama4 jamba granite-hybrid
  374. "transformer_encoder.{bid}.ffn.w12", # neobert
  375. "model.layers.{bid}.block_sparse_moe.up", # smallthinker
  376. "model.transformer.blocks.{bid}.up_proj", # llada
  377. "layers.{bid}.mlp.up_proj", # qwen3-embedding
  378. "backbone.layers.{bid}.mixer.up_proj", # nemotron-h
  379. ),
  380. MODEL_TENSOR.FFN_UP_EXP: (
  381. "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
  382. "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
  383. "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
  384. "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) ernie4.5-moe
  385. "model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged)
  386. "model.layers.{bid}.feed_forward.experts.up_proj", # llama4
  387. "encoder.layers.{bid}.mlp.experts.mlp.w1", # nomic-bert-moe
  388. "model.layers.{bid}.block_sparse_moe.experts.up", # smallthinker
  389. ),
  390. MODEL_TENSOR.FFN_UP_SHEXP: (
  391. "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
  392. "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek deepseek2
  393. "model.layers.{bid}.feed_forward.shared_expert.up_proj", # llama4
  394. "model.layers.{bid}.feed_forward.down_proj",
  395. "model.layers.{bid}.mlp.shared_mlp.up_proj", # hunyuan
  396. ),
  397. MODEL_TENSOR.FFN_UP_CHEXP: (
  398. "model.layers.{bid}.mlp.chunk_experts.up_proj", # grovemoe
  399. ),
  400. # AWQ-activation gate
  401. MODEL_TENSOR.FFN_ACT: (
  402. "transformer.blocks.{bid}.ffn.act", # mpt
  403. ),
  404. # Feed-forward gate
  405. MODEL_TENSOR.FFN_GATE: (
  406. "model.layers.{bid}.mlp.gate_proj", # llama-hf refact olmo2
  407. "layers.{bid}.mlp.gate_proj", # embeddinggemma
  408. "layers.{bid}.feed_forward.w1", # llama-pth
  409. "transformer.h.{bid}.mlp.w2", # qwen
  410. "transformer.h.{bid}.mlp.c_fc2", # jais
  411. "model.layers.layers.{bid}.mlp.gate_proj", # plamo
  412. "model.layers.{bid}.feed_forward.w1", # internlm2
  413. "encoder.layers.{bid}.mlp.fc12", # nomic-bert
  414. "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 (split up/gate, no longer used)
  415. "transformer.h.{bid}.mlp.linear_1", # refact
  416. "model.layers.{bid}.residual_mlp.w1", # arctic
  417. "transformer.h.{bid}.mlp.c_fc_0", # exaone
  418. "model.layers.{bid}.feed_forward.gate_proj", # llama4 jamba granite-hybrid
  419. "model.transformer.blocks.{bid}.ff_proj", # llada
  420. "layers.{bid}.mlp.gate_proj", # qwen3-embedding
  421. ),
  422. MODEL_TENSOR.FFN_GATE_EXP: (
  423. "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
  424. "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
  425. "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
  426. "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged) ernie4.5-moe
  427. "model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged)
  428. "model.layers.{bid}.feed_forward.experts.gate_proj", # llama4
  429. "model.layers.{bid}.block_sparse_moe.experts.gate", # smallthinker
  430. ),
  431. MODEL_TENSOR.FFN_GATE_SHEXP: (
  432. "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
  433. "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek deepseek2
  434. "model.layers.{bid}.feed_forward.shared_expert.gate_proj", # llama4
  435. "model.layers.{bid}.mlp.shared_mlp.gate_proj", # hunyuan
  436. ),
  437. MODEL_TENSOR.FFN_GATE_CHEXP: (
  438. "model.layers.{bid}.mlp.chunk_experts.gate_proj", # grovemoe
  439. ),
  440. # Feed-forward down
  441. MODEL_TENSOR.FFN_DOWN: (
  442. "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
  443. "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
  444. "transformer.blocks.{bid}.ffn.down_proj", # mpt
  445. "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
  446. "h.{bid}.mlp.dense_4h_to_h", # bloom
  447. "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron olmo2
  448. "layers.{bid}.mlp.down_proj", # embeddinggemma
  449. "layers.{bid}.feed_forward.w2", # llama-pth
  450. "encoder.layer.{bid}.output.dense", # bert
  451. "transformer.layer.{bid}.ffn.lin2", # distillbert
  452. "transformer.h.{bid}.mlp.fc_out", # gpt-j
  453. "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  454. "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
  455. "h.{bid}.mlp.c_proj", # gpt2
  456. "transformer.h.{bid}.mlp.fc2", # phi2
  457. "model.layers.{bid}.mlp.fc2", # phi2
  458. "model.layers.layers.{bid}.mlp.down_proj", # plamo
  459. "model.layers.{bid}.feed_forward.w2", # internlm2
  460. "encoder.layers.{bid}.mlp.fc2", # nomic-bert
  461. "model.layers.{bid}.mlp.c_proj", # starcoder2
  462. "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
  463. "transformer.layers.{bid}.ffn.proj_2", # openelm
  464. "model.layers.{bid}.residual_mlp.w2", # arctic
  465. "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
  466. "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
  467. "model.layers.h.{bid}.mlp.c_proj", # exaone
  468. "model.layers.{bid}.feed_forward.down_proj", # llama4 jamba granite-hybrid
  469. "transformer_encoder.{bid}.ffn.w3", # neobert
  470. "model.layers.{bid}.block_sparse_moe.down", # smallthinker
  471. "model.transformer.blocks.{bid}.ff_out", # llada
  472. "layers.{bid}.mlp.down_proj", # qwen3-embedding
  473. "backbone.layers.{bid}.mixer.down_proj", # nemotron-h
  474. ),
  475. MODEL_TENSOR.FFN_DOWN_EXP: (
  476. "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
  477. "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
  478. "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
  479. "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged) ernie4.5-moe
  480. "model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe
  481. "model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged)
  482. "model.layers.{bid}.feed_forward.experts.down_proj", # llama4
  483. "encoder.layers.{bid}.mlp.experts.mlp.w2", # nomic-bert-moe
  484. "model.layers.{bid}.block_sparse_moe.experts.down", # smallthinker
  485. ),
  486. MODEL_TENSOR.FFN_DOWN_SHEXP: (
  487. "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
  488. "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek deepseek2
  489. "model.layers.{bid}.feed_forward.shared_expert.down_proj", # llama4
  490. "model.layers.{bid}.shared_mlp.output_linear", # granitemoe
  491. "model.layers.{bid}.mlp.shared_mlp.down_proj", # hunyuan
  492. ),
  493. MODEL_TENSOR.FFN_DOWN_CHEXP: (
  494. "model.layers.{bid}.mlp.chunk_experts.down_proj", # grovemoe
  495. ),
  496. MODEL_TENSOR.ATTN_Q_NORM: (
  497. "language_model.encoder.layers.{bid}.self_attention.q_layernorm",
  498. "model.layers.{bid}.self_attn.q_layernorm", # persimmon
  499. "model.layers.{bid}.self_attn.query_layernorm", # hunyuan
  500. "model.layers.{bid}.self_attn.q_norm", # cohere olmoe chameleon olmo2
  501. "layers.{bid}.self_attn.q_norm", # embeddinggemma
  502. "transformer.blocks.{bid}.attn.q_ln", # sea-lion
  503. "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
  504. "transformer.layers.{bid}.attn.q_norm", # openelm
  505. "model.layers.layers.{bid}.mixer.q", # plamo2
  506. "layers.{bid}.self_attn.q_norm", # qwen3-embedding
  507. ),
  508. MODEL_TENSOR.ATTN_K_NORM: (
  509. "language_model.encoder.layers.{bid}.self_attention.k_layernorm",
  510. "model.layers.{bid}.self_attn.k_layernorm", # persimmon
  511. "model.layers.{bid}.self_attn.key_layernorm", # hunyuan
  512. "model.layers.{bid}.self_attn.k_norm", # cohere olmoe chameleon olmo2
  513. "layers.{bid}.self_attn.k_norm", # embeddinggemma
  514. "transformer.blocks.{bid}.attn.k_ln", # sea-lion
  515. "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
  516. "transformer.layers.{bid}.attn.k_norm", # openelm
  517. "model.layers.layers.{bid}.mixer.k", # plamo2
  518. "layers.{bid}.self_attn.k_norm", # qwen3-embedding
  519. ),
  520. MODEL_TENSOR.ROPE_FREQS: (
  521. "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
  522. ),
  523. MODEL_TENSOR.LAYER_OUT_NORM: (
  524. "encoder.layer.{bid}.output.LayerNorm", # bert
  525. "transformer.layer.{bid}.output_layer_norm", # distillbert
  526. "encoder.layers.{bid}.norm2", # nomic-bert
  527. "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
  528. "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
  529. "encoder.layer.{bid}.layer_norm_2", # jina-v2-code
  530. ),
  531. MODEL_TENSOR.PER_LAYER_TOKEN_EMBD: (
  532. "model.embed_tokens_per_layer", # gemma3n
  533. ),
  534. MODEL_TENSOR.PER_LAYER_MODEL_PROJ: (
  535. "model.per_layer_model_projection", # gemma3n
  536. ),
  537. MODEL_TENSOR.PER_LAYER_PROJ_NORM: (
  538. "model.per_layer_projection_norm", # gemma3n
  539. ),
  540. MODEL_TENSOR.ALTUP_PROJ: (
  541. "model.altup_projections", # gemma3n
  542. ),
  543. MODEL_TENSOR.ALTUP_UNEMBD_PROJ: (
  544. "model.altup_unembed_projections", # gemma3n
  545. ),
  546. MODEL_TENSOR.PER_LAYER_INP_GATE: (
  547. "model.layers.{bid}.per_layer_input_gate", # gemma3n
  548. ),
  549. MODEL_TENSOR.PER_LAYER_PROJ: (
  550. "model.layers.{bid}.per_layer_projection", # gemma3n
  551. ),
  552. MODEL_TENSOR.PER_LAYER_POST_NORM: (
  553. "model.layers.{bid}.post_per_layer_input_norm", # gemma3n
  554. ),
  555. MODEL_TENSOR.ALTUP_CORRECT_COEF: (
  556. "model.layers.{bid}.altup.correction_coefs", # gemma3n
  557. ),
  558. MODEL_TENSOR.ALTUP_CORRECT_SCALE: (
  559. "model.layers.{bid}.altup.correct_output_scale", # gemma3n
  560. ),
  561. MODEL_TENSOR.ALTUP_PREDICT_COEF: (
  562. "model.layers.{bid}.altup.prediction_coefs", # gemma3n
  563. ),
  564. MODEL_TENSOR.ALTUP_ROUTER: (
  565. "model.layers.{bid}.altup.modality_router", # gemma3n
  566. ),
  567. MODEL_TENSOR.ALTUP_ROUTER_NORM: (
  568. "model.layers.{bid}.altup.router_norm", # gemma3n
  569. ),
  570. MODEL_TENSOR.LAUREL_L: (
  571. "model.layers.{bid}.laurel.linear_left", # gemma3n
  572. ),
  573. MODEL_TENSOR.LAUREL_R: (
  574. "model.layers.{bid}.laurel.linear_right", # gemma3n
  575. ),
  576. MODEL_TENSOR.LAUREL_POST_NORM: (
  577. "model.layers.{bid}.laurel.post_laurel_norm", # gemma3n
  578. ),
  579. MODEL_TENSOR.SSM_IN: (
  580. "model.layers.{bid}.in_proj", # mamba-hf
  581. "backbone.layers.{bid}.mixer.in_proj", # mamba
  582. "model.layers.{bid}.mamba.in_proj", # jamba falcon-h1 granite-hybrid
  583. "model.layers.layers.{bid}.mixer.in_proj", # plamo2
  584. ),
  585. MODEL_TENSOR.SSM_CONV1D: (
  586. "model.layers.{bid}.conv1d", # mamba-hf
  587. "backbone.layers.{bid}.mixer.conv1d", # mamba
  588. "model.layers.{bid}.mamba.conv1d", # jamba falcon-h1 granite-hybrid
  589. "model.layers.layers.{bid}.mixer.conv1d", # plamo2
  590. ),
  591. MODEL_TENSOR.SSM_X: (
  592. "model.layers.{bid}.x_proj", # mamba-hf
  593. "backbone.layers.{bid}.mixer.x_proj", # mamba
  594. "model.layers.{bid}.mamba.x_proj", # jamba
  595. "model.layers.layers.{bid}.mixer.bcdt_proj", # plamo2
  596. ),
  597. MODEL_TENSOR.SSM_DT: (
  598. "model.layers.{bid}.dt_proj", # mamba-hf
  599. "backbone.layers.{bid}.mixer.dt_proj", # mamba
  600. "model.layers.{bid}.mamba.dt_proj", # jamba falcon-h1 granite-hybrid
  601. "model.layers.layers.{bid}.mixer.dt_proj", # plamo2
  602. ),
  603. MODEL_TENSOR.SSM_DT_NORM: (
  604. "model.layers.layers.{bid}.mixer.dt_norm.weight", # plamo2
  605. "model.layers.{bid}.mamba.dt_layernorm", # jamba
  606. ),
  607. MODEL_TENSOR.SSM_A: (
  608. "model.layers.{bid}.A_log", # mamba-hf
  609. "backbone.layers.{bid}.mixer.A_log", # mamba
  610. "model.layers.{bid}.mamba.A_log", # jamba falcon-h1 granite-hybrid
  611. "model.layers.layers.{bid}.mixer.A_log", # plamo2
  612. ),
  613. MODEL_TENSOR.SSM_B_NORM: (
  614. "model.layers.{bid}.mamba.b_layernorm", # jamba
  615. "model.layers.{bid}.mamba.B_layernorm", # mini-jamba
  616. "model.layers.layers.{bid}.mixer.B_norm.weight", # plamo2
  617. ),
  618. MODEL_TENSOR.SSM_C_NORM: (
  619. "model.layers.{bid}.mamba.c_layernorm", # jamba
  620. "model.layers.{bid}.mamba.C_layernorm", # mini-jamba
  621. "model.layers.layers.{bid}.mixer.C_norm.weight", # plamo2
  622. ),
  623. MODEL_TENSOR.SSM_D: (
  624. "model.layers.{bid}.D", # mamba-hf
  625. "backbone.layers.{bid}.mixer.D", # mamba
  626. "model.layers.{bid}.mamba.D", # jamba falcon-h1 granite-hybrid
  627. "model.layers.layers.{bid}.mixer.D", # plamo2
  628. ),
  629. MODEL_TENSOR.SSM_NORM: (
  630. "model.layers.{bid}.mamba.norm", # falcon-h1 granite-hybrid
  631. "backbone.layers.{bid}.mixer.norm", # mamba2
  632. ),
  633. MODEL_TENSOR.SSM_OUT: (
  634. "model.layers.{bid}.out_proj", # mamba-hf
  635. "backbone.layers.{bid}.mixer.out_proj", # mamba
  636. "model.layers.{bid}.mamba.out_proj", # jamba falcon-h1 granite-hybrid
  637. "model.layers.layers.{bid}.mixer.out_proj", # plamo2
  638. ),
  639. MODEL_TENSOR.TIME_MIX_W0: (
  640. "model.layers.{bid}.attention.w0", # rwkv7
  641. ),
  642. MODEL_TENSOR.TIME_MIX_W1: (
  643. "rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv6
  644. "model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2
  645. "model.layers.{bid}.attention.w1", # rwkv7
  646. ),
  647. MODEL_TENSOR.TIME_MIX_W2: (
  648. "rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv6
  649. "model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2
  650. "model.layers.{bid}.attention.w2", # rwkv7
  651. ),
  652. MODEL_TENSOR.TIME_MIX_A0: (
  653. "model.layers.{bid}.attention.a0", # rwkv7
  654. ),
  655. MODEL_TENSOR.TIME_MIX_A1: (
  656. "model.layers.{bid}.attention.a1", # rwkv7
  657. ),
  658. MODEL_TENSOR.TIME_MIX_A2: (
  659. "model.layers.{bid}.attention.a2", # rwkv7
  660. ),
  661. MODEL_TENSOR.TIME_MIX_V0: (
  662. "model.layers.{bid}.attention.v0", # rwkv7
  663. ),
  664. MODEL_TENSOR.TIME_MIX_V1: (
  665. "model.layers.{bid}.attention.v1", # rwkv7
  666. ),
  667. MODEL_TENSOR.TIME_MIX_V2: (
  668. "model.layers.{bid}.attention.v2", # rwkv7
  669. ),
  670. MODEL_TENSOR.TIME_MIX_G1: (
  671. "model.layers.{bid}.attention.g1", # rwkv7
  672. ),
  673. MODEL_TENSOR.TIME_MIX_G2: (
  674. "model.layers.{bid}.attention.g2", # rwkv7
  675. ),
  676. MODEL_TENSOR.TIME_MIX_K_K: (
  677. "model.layers.{bid}.attention.k_k", # rwkv7
  678. ),
  679. MODEL_TENSOR.TIME_MIX_K_A: (
  680. "model.layers.{bid}.attention.k_a", # rwkv7
  681. ),
  682. MODEL_TENSOR.TIME_MIX_R_K: (
  683. "model.layers.{bid}.attention.r_k", # rwkv7
  684. ),
  685. MODEL_TENSOR.TIME_MIX_LERP_X: (
  686. "rwkv.blocks.{bid}.attention.time_maa_x", # rwkv6
  687. "model.layers.{bid}.self_attn.time_maa_x", # rwkv6qwen2
  688. ),
  689. MODEL_TENSOR.TIME_MIX_LERP_K: (
  690. "rwkv.blocks.{bid}.attention.time_maa_k", # rwkv6
  691. "model.layers.{bid}.self_attn.time_maa_k", # rwkv6qwen2
  692. ),
  693. MODEL_TENSOR.TIME_MIX_LERP_V: (
  694. "rwkv.blocks.{bid}.attention.time_maa_v", # rwkv6
  695. "model.layers.{bid}.self_attn.time_maa_v", # rwkv6qwen2
  696. ),
  697. MODEL_TENSOR.TIME_MIX_LERP_R: (
  698. "rwkv.blocks.{bid}.attention.time_maa_r", # rwkv6
  699. "model.layers.{bid}.self_attn.time_maa_r", # rwkv6qwen2
  700. ),
  701. MODEL_TENSOR.TIME_MIX_LERP_G: (
  702. "rwkv.blocks.{bid}.attention.time_maa_g", # rwkv6
  703. "model.layers.{bid}.self_attn.time_maa_g", # rwkv6qwen2
  704. ),
  705. MODEL_TENSOR.TIME_MIX_LERP_W: (
  706. "rwkv.blocks.{bid}.attention.time_maa_w", # rwkv6
  707. "model.layers.{bid}.self_attn.time_maa_w", # rwkv6qwen2
  708. ),
  709. MODEL_TENSOR.TIME_MIX_FIRST: (
  710. "rwkv.blocks.{bid}.attention.time_faaaa", # rwkv6
  711. ),
  712. MODEL_TENSOR.TIME_MIX_DECAY: (
  713. "rwkv.blocks.{bid}.attention.time_decay", # rwkv6
  714. "model.layers.{bid}.self_attn.time_decay", # rwkv6qwen2
  715. ),
  716. MODEL_TENSOR.TIME_MIX_DECAY_W1: (
  717. "rwkv.blocks.{bid}.attention.time_decay_w1", # rwkv6
  718. "model.layers.{bid}.self_attn.time_decay_w1", # rwkv6qwen2
  719. ),
  720. MODEL_TENSOR.TIME_MIX_DECAY_W2: (
  721. "rwkv.blocks.{bid}.attention.time_decay_w2", # rwkv6
  722. "model.layers.{bid}.self_attn.time_decay_w2", # rwkv6qwen2
  723. ),
  724. MODEL_TENSOR.TIME_MIX_KEY: (
  725. "rwkv.blocks.{bid}.attention.key", # rwkv6
  726. "model.layers.{bid}.self_attn.k_proj", # rwkv6qwen2
  727. "model.layers.{bid}.attention.key", # rwkv7
  728. "model.layers.{bid}.attention.k_proj", # rwkv7
  729. ),
  730. MODEL_TENSOR.TIME_MIX_VALUE: (
  731. "rwkv.blocks.{bid}.attention.value", # rwkv6
  732. "model.layers.{bid}.self_attn.v_proj", # rwkv6qwen2
  733. "model.layers.{bid}.attention.value", # rwkv7
  734. "model.layers.{bid}.attention.v_proj", # rwkv7
  735. ),
  736. MODEL_TENSOR.TIME_MIX_RECEPTANCE: (
  737. "rwkv.blocks.{bid}.attention.receptance", # rwkv6
  738. "model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2
  739. "model.layers.{bid}.attention.receptance", # rwkv7
  740. "model.layers.{bid}.attention.r_proj", # rwkv7
  741. ),
  742. MODEL_TENSOR.TIME_MIX_GATE: (
  743. "rwkv.blocks.{bid}.attention.gate", # rwkv6
  744. "model.layers.{bid}.self_attn.gate", # rwkv6qwen2
  745. ),
  746. MODEL_TENSOR.TIME_MIX_LN: (
  747. "rwkv.blocks.{bid}.attention.ln_x", # rwkv6
  748. "model.layers.{bid}.attention.ln_x" # rwkv7
  749. ),
  750. MODEL_TENSOR.TIME_MIX_OUTPUT: (
  751. "rwkv.blocks.{bid}.attention.output", # rwkv6
  752. "model.layers.{bid}.self_attn.o_proj", # rwkv6qwen2
  753. "model.layers.{bid}.attention.output", # rwkv7
  754. "model.layers.{bid}.attention.o_proj", # rwkv7
  755. ),
  756. MODEL_TENSOR.CHANNEL_MIX_LERP_K: (
  757. "rwkv.blocks.{bid}.feed_forward.time_maa_k", # rwkv6
  758. "model.layers.{bid}.feed_forward.x_k", # rwkv7
  759. ),
  760. MODEL_TENSOR.CHANNEL_MIX_LERP_R: (
  761. "rwkv.blocks.{bid}.feed_forward.time_maa_r", # rwkv6
  762. ),
  763. MODEL_TENSOR.CHANNEL_MIX_KEY: (
  764. "rwkv.blocks.{bid}.feed_forward.key", # rwkv6
  765. "model.layers.{bid}.feed_forward.key", # rwkv7
  766. ),
  767. MODEL_TENSOR.CHANNEL_MIX_RECEPTANCE: (
  768. "rwkv.blocks.{bid}.feed_forward.receptance", # rwkv6
  769. ),
  770. MODEL_TENSOR.CHANNEL_MIX_VALUE: (
  771. "rwkv.blocks.{bid}.feed_forward.value", # rwkv6
  772. "model.layers.{bid}.feed_forward.value", # rwkv7
  773. ),
  774. MODEL_TENSOR.ATTN_Q_A: (
  775. "model.layers.{bid}.self_attn.q_a_proj", # deepseek2
  776. ),
  777. MODEL_TENSOR.ATTN_Q_B: (
  778. "model.layers.{bid}.self_attn.q_b_proj", # deepseek2
  779. ),
  780. MODEL_TENSOR.ATTN_KV_A_MQA: (
  781. "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
  782. ),
  783. MODEL_TENSOR.ATTN_KV_B: (
  784. "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
  785. ),
  786. MODEL_TENSOR.ATTN_K_B: (
  787. "model.layers.{bid}.self_attn.k_b_proj", # deepseek2
  788. ),
  789. MODEL_TENSOR.ATTN_V_B: (
  790. "model.layers.{bid}.self_attn.v_b_proj", # deepseek2
  791. ),
  792. MODEL_TENSOR.ATTN_Q_A_NORM: (
  793. "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
  794. ),
  795. MODEL_TENSOR.ATTN_KV_A_NORM: (
  796. "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
  797. ),
  798. MODEL_TENSOR.ATTN_SUB_NORM: (
  799. "model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
  800. ),
  801. MODEL_TENSOR.FFN_SUB_NORM: (
  802. "model.layers.{bid}.mlp.ffn_layernorm", # bitnet
  803. ),
  804. MODEL_TENSOR.DEC_ATTN_NORM: (
  805. "decoder.block.{bid}.layer.0.layer_norm", # t5
  806. ),
  807. MODEL_TENSOR.DEC_ATTN_Q: (
  808. "decoder.block.{bid}.layer.0.SelfAttention.q", # t5
  809. ),
  810. MODEL_TENSOR.DEC_ATTN_K: (
  811. "decoder.block.{bid}.layer.0.SelfAttention.k", # t5
  812. ),
  813. MODEL_TENSOR.DEC_ATTN_V: (
  814. "decoder.block.{bid}.layer.0.SelfAttention.v", # t5
  815. ),
  816. MODEL_TENSOR.DEC_ATTN_OUT: (
  817. "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
  818. ),
  819. MODEL_TENSOR.DEC_ATTN_REL_B: (
  820. "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  821. ),
  822. MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
  823. "decoder.block.{bid}.layer.1.layer_norm", # t5
  824. ),
  825. MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
  826. "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
  827. ),
  828. MODEL_TENSOR.DEC_CROSS_ATTN_K: (
  829. "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
  830. ),
  831. MODEL_TENSOR.DEC_CROSS_ATTN_V: (
  832. "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
  833. ),
  834. MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
  835. "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
  836. ),
  837. MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
  838. "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
  839. ),
  840. MODEL_TENSOR.DEC_FFN_NORM: (
  841. "decoder.block.{bid}.layer.2.layer_norm", # t5
  842. ),
  843. MODEL_TENSOR.DEC_FFN_GATE: (
  844. "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
  845. ),
  846. MODEL_TENSOR.DEC_FFN_UP: (
  847. "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
  848. "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
  849. ),
  850. MODEL_TENSOR.DEC_FFN_DOWN: (
  851. "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
  852. ),
  853. MODEL_TENSOR.DEC_OUTPUT_NORM: (
  854. "decoder.final_layer_norm", # t5
  855. ),
  856. MODEL_TENSOR.ENC_ATTN_NORM: (
  857. "encoder.block.{bid}.layer.0.layer_norm", # t5
  858. ),
  859. MODEL_TENSOR.ENC_ATTN_Q: (
  860. "encoder.block.{bid}.layer.0.SelfAttention.q", # t5
  861. ),
  862. MODEL_TENSOR.ENC_ATTN_K: (
  863. "encoder.block.{bid}.layer.0.SelfAttention.k", # t5
  864. ),
  865. MODEL_TENSOR.ENC_ATTN_V: (
  866. "encoder.block.{bid}.layer.0.SelfAttention.v", # t5
  867. ),
  868. MODEL_TENSOR.ENC_ATTN_OUT: (
  869. "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
  870. ),
  871. MODEL_TENSOR.ENC_ATTN_REL_B: (
  872. "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
  873. ),
  874. MODEL_TENSOR.ENC_FFN_NORM: (
  875. "encoder.block.{bid}.layer.1.layer_norm", # t5
  876. ),
  877. MODEL_TENSOR.ENC_FFN_GATE: (
  878. "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
  879. ),
  880. MODEL_TENSOR.ENC_FFN_UP: (
  881. "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
  882. "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
  883. ),
  884. MODEL_TENSOR.ENC_FFN_DOWN: (
  885. "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
  886. ),
  887. ############################################################################
  888. # TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg
  889. MODEL_TENSOR.ENC_OUTPUT_NORM: (
  890. "encoder.final_layer_norm", # t5
  891. "layer_norm", # neobert
  892. ),
  893. MODEL_TENSOR.CLS: (
  894. "classifier", # jina
  895. "classifier.dense", # roberta
  896. "pre_classifier", # distillbert
  897. "dense", # neobert
  898. ),
  899. MODEL_TENSOR.CLS_OUT: (
  900. "classifier.out_proj", # roberta
  901. ),
  902. #############################################################################
  903. MODEL_TENSOR.CONVNEXT_DW: (
  904. "backbone.convnext.{bid}.dwconv", # wavtokenizer
  905. ),
  906. MODEL_TENSOR.CONVNEXT_NORM: (
  907. "backbone.convnext.{bid}.norm", # wavtokenizer
  908. ),
  909. MODEL_TENSOR.CONVNEXT_PW1: (
  910. "backbone.convnext.{bid}.pwconv1", # wavtokenizer
  911. ),
  912. MODEL_TENSOR.CONVNEXT_PW2: (
  913. "backbone.convnext.{bid}.pwconv2", # wavtokenizer
  914. ),
  915. MODEL_TENSOR.CONVNEXT_GAMMA: (
  916. "backbone.convnext.{bid}.gamma", # wavtokenizer
  917. ),
  918. MODEL_TENSOR.POSNET_CONV1: (
  919. "backbone.posnet.{bid}.conv1", # wavtokenizer
  920. ),
  921. MODEL_TENSOR.POSNET_CONV2: (
  922. "backbone.posnet.{bid}.conv2", # wavtokenizer
  923. ),
  924. MODEL_TENSOR.POSNET_NORM: (
  925. "backbone.posnet.{bid}.norm", # wavtokenizer
  926. ),
  927. MODEL_TENSOR.POSNET_NORM1: (
  928. "backbone.posnet.{bid}.norm1", # wavtokenizer
  929. ),
  930. MODEL_TENSOR.POSNET_NORM2: (
  931. "backbone.posnet.{bid}.norm2", # wavtokenizer
  932. ),
  933. MODEL_TENSOR.POSNET_ATTN_NORM: (
  934. "backbone.posnet.{bid}.norm", # wavtokenizer
  935. ),
  936. MODEL_TENSOR.POSNET_ATTN_Q: (
  937. "backbone.posnet.{bid}.q", # wavtokenizer
  938. ),
  939. MODEL_TENSOR.POSNET_ATTN_K: (
  940. "backbone.posnet.{bid}.k", # wavtokenizer
  941. ),
  942. MODEL_TENSOR.POSNET_ATTN_V: (
  943. "backbone.posnet.{bid}.v", # wavtokenizer
  944. ),
  945. MODEL_TENSOR.POSNET_ATTN_OUT: (
  946. "backbone.posnet.{bid}.proj_out", # wavtokenizer
  947. ),
  948. MODEL_TENSOR.SHORTCONV_CONV: (
  949. "model.layers.{bid}.conv.conv",
  950. ),
  951. MODEL_TENSOR.SHORTCONV_INPROJ: (
  952. "model.layers.{bid}.conv.in_proj",
  953. ),
  954. MODEL_TENSOR.SHORTCONV_OUTPROJ: (
  955. "model.layers.{bid}.conv.out_proj",
  956. ),
  957. #############################################################################
  958. ## Vision encoder
  959. MODEL_TENSOR.V_MMPROJ: (
  960. "multi_modal_projector.linear_{bid}",
  961. "visual.merger.mlp.{bid}", # qwen2vl
  962. ),
  963. MODEL_TENSOR.V_MMPROJ_FC: (
  964. "model.connector.modality_projection.proj", # SmolVLM
  965. ),
  966. MODEL_TENSOR.V_MMPROJ_MLP: (
  967. "model.mm_projector.mlp.mlp.{bid}",
  968. "vision_model.vision_adapter.mlp.fc{bid}", # llama 4
  969. "mlp1.{bid}", # InternVL
  970. ),
  971. MODEL_TENSOR.V_MMPROJ_PEG: (
  972. "model.mm_projector.peg.peg.{bid}",
  973. ),
  974. MODEL_TENSOR.V_ENC_EMBD_CLS: (
  975. "vision_tower.vision_model.embeddings.class_embedding",
  976. "model.vision_tower.embeddings.cls_token", # Intern-S1
  977. "vision_model.class_embedding", # llama 4
  978. ),
  979. MODEL_TENSOR.V_ENC_EMBD_PATCH: (
  980. "vision_tower.vision_model.embeddings.patch_embedding",
  981. "model.vision_tower.embeddings.patch_embeddings.projection", # Intern-S1
  982. "vpm.embeddings.patch_embedding",
  983. "model.vision_model.embeddings.patch_embedding", # SmolVLM
  984. "vision_tower.patch_conv", # pixtral-hf
  985. "vision_encoder.patch_conv", # pixtral
  986. "vision_model.patch_embedding.linear", # llama 4
  987. "visual.patch_embed.proj", # qwen2vl
  988. "vision_tower.patch_embed.proj", # kimi-vl
  989. ),
  990. MODEL_TENSOR.V_ENC_EMBD_POS: (
  991. "vision_tower.vision_model.embeddings.position_embedding",
  992. "model.vision_tower.embeddings.position_embeddings", # Intern-S1
  993. "vpm.embeddings.position_embedding",
  994. "model.vision_model.embeddings.position_embedding", # SmolVLM
  995. "vision_model.positional_embedding_vlm", # llama 4
  996. "vision_tower.patch_embed.pos_emb", # kimi-vl
  997. ),
  998. MODEL_TENSOR.V_ENC_ATTN_Q: (
  999. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.q_proj",
  1000. "model.vision_tower.encoder.layer.{bid}.attention.q_proj", # Intern-S1
  1001. "vpm.encoder.layers.{bid}.self_attn.q_proj",
  1002. "model.vision_model.encoder.layers.{bid}.self_attn.q_proj", # SmolVLM
  1003. "vision_model.model.layers.{bid}.self_attn.q_proj", # llama4
  1004. "vision_tower.transformer.layers.{bid}.attention.q_proj", # pixtral-hf
  1005. "vision_encoder.transformer.layers.{bid}.attention.wq", # pixtral
  1006. "visual.blocks.{bid}.attn.q", # qwen2vl, generated
  1007. "vision_tower.encoder.blocks.{bid}.wq", # kimi-vl, generated
  1008. ),
  1009. MODEL_TENSOR.V_ENC_ATTN_Q_NORM: (
  1010. "vision_tower.vision_model.encoder.layers.{bid}.attn.q_norm", # InternVL
  1011. "model.vision_tower.encoder.layer.{bid}.attention.q_norm", # Intern-S1
  1012. ),
  1013. MODEL_TENSOR.V_ENC_ATTN_K: (
  1014. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.k_proj",
  1015. "model.vision_tower.encoder.layer.{bid}.attention.k_proj", # Intern-S1
  1016. "vpm.encoder.layers.{bid}.self_attn.k_proj",
  1017. "model.vision_model.encoder.layers.{bid}.self_attn.k_proj", # SmolVLM
  1018. "vision_model.model.layers.{bid}.self_attn.k_proj", # llama4
  1019. "vision_tower.transformer.layers.{bid}.attention.k_proj", # pixtral-hf
  1020. "vision_encoder.transformer.layers.{bid}.attention.wk", # pixtral
  1021. "visual.blocks.{bid}.attn.k", # qwen2vl, generated
  1022. "vision_tower.encoder.blocks.{bid}.wk", # kimi-vl, generated
  1023. ),
  1024. MODEL_TENSOR.V_ENC_ATTN_K_NORM: (
  1025. "vision_tower.vision_model.encoder.layers.{bid}.attn.k_norm", # InternVL
  1026. "model.vision_tower.encoder.layer.{bid}.attention.k_norm", # Intern-S1
  1027. ),
  1028. MODEL_TENSOR.V_ENC_ATTN_V: (
  1029. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.v_proj",
  1030. "model.vision_tower.encoder.layer.{bid}.attention.v_proj", # Intern-S1
  1031. "vpm.encoder.layers.{bid}.self_attn.v_proj",
  1032. "model.vision_model.encoder.layers.{bid}.self_attn.v_proj", # SmolVLM
  1033. "vision_model.model.layers.{bid}.self_attn.v_proj", # llama4
  1034. "vision_tower.transformer.layers.{bid}.attention.v_proj", # pixtral-hf
  1035. "vision_encoder.transformer.layers.{bid}.attention.wv", # pixtral
  1036. "visual.blocks.{bid}.attn.v", # qwen2vl, generated
  1037. "vision_tower.encoder.blocks.{bid}.wv", # kimi-vl, generated
  1038. ),
  1039. MODEL_TENSOR.V_ENC_INPUT_NORM: (
  1040. "vision_tower.vision_model.encoder.layers.{bid}.layer_norm1",
  1041. "vision_tower.vision_model.encoder.layers.{bid}.norm1", # InternVL
  1042. "model.vision_tower.encoder.layer.{bid}.layernorm_before", # Intern-S1
  1043. "vpm.encoder.layers.{bid}.layer_norm1",
  1044. "model.vision_model.encoder.layers.{bid}.layer_norm1", # SmolVLM
  1045. "vision_tower.transformer.layers.{bid}.attention_norm", # pixtral-hf
  1046. "vision_encoder.transformer.layers.{bid}.attention_norm", # pixtral
  1047. "vision_model.model.layers.{bid}.input_layernorm", # llama4
  1048. "visual.blocks.{bid}.norm1", # qwen2vl
  1049. "vision_tower.encoder.blocks.{bid}.norm0", # kimi-vl (norm0/norm1)
  1050. ),
  1051. MODEL_TENSOR.V_ENC_ATTN_O: (
  1052. "vision_tower.vision_model.encoder.layers.{bid}.self_attn.out_proj",
  1053. "vision_tower.vision_model.encoder.layers.{bid}.attn.proj", # InternVL
  1054. "model.vision_tower.encoder.layer.{bid}.attention.projection_layer", # Intern-S1
  1055. "vpm.encoder.layers.{bid}.self_attn.out_proj",
  1056. "model.vision_model.encoder.layers.{bid}.self_attn.out_proj", # SmolVLM
  1057. "vision_model.model.layers.{bid}.self_attn.o_proj", # llama4
  1058. "vision_tower.transformer.layers.{bid}.attention.o_proj", # pixtral-hf
  1059. "vision_encoder.transformer.layers.{bid}.attention.wo", # pixtral
  1060. "visual.blocks.{bid}.attn.proj", # qwen2vl
  1061. "vision_tower.encoder.blocks.{bid}.wo", # kimi-vl
  1062. ),
  1063. MODEL_TENSOR.V_ENC_POST_ATTN_NORM: (
  1064. "vision_tower.vision_model.encoder.layers.{bid}.layer_norm2",
  1065. "vision_tower.vision_model.encoder.layers.{bid}.norm2", # InternVL
  1066. "model.vision_tower.encoder.layer.{bid}.layernorm_after", # Intern-S1
  1067. "vpm.encoder.layers.{bid}.layer_norm2",
  1068. "model.vision_model.encoder.layers.{bid}.layer_norm2", # SmolVLM
  1069. "vision_model.model.layers.{bid}.post_attention_layernorm", # llama4
  1070. "vision_tower.transformer.layers.{bid}.ffn_norm", # pixtral-hf
  1071. "vision_encoder.transformer.layers.{bid}.ffn_norm", # pixtral
  1072. "visual.blocks.{bid}.norm2", # qwen2vl
  1073. "vision_tower.encoder.blocks.{bid}.norm1", # kimi-vl (norm0/norm1)
  1074. ),
  1075. MODEL_TENSOR.V_ENC_FFN_UP: (
  1076. "vision_tower.vision_model.encoder.layers.{bid}.mlp.fc1",
  1077. "model.vision_tower.encoder.layer.{bid}.mlp.fc1", # Intern-S1
  1078. "vpm.encoder.layers.{bid}.mlp.fc1",
  1079. "model.vision_model.encoder.layers.{bid}.mlp.fc1", # SmolVLM, gemma3
  1080. "vision_tower.transformer.layers.{bid}.feed_forward.up_proj", # pixtral-hf
  1081. "vision_encoder.transformer.layers.{bid}.feed_forward.w3", # pixtral
  1082. "vision_model.model.layers.{bid}.mlp.fc1", # llama4
  1083. "visual.blocks.{bid}.mlp.fc1", # qwen2vl
  1084. "visual.blocks.{bid}.mlp.up_proj", # qwen2.5vl
  1085. "vision_tower.encoder.blocks.{bid}.mlp.fc0", # kimi-vl (fc0/fc1)
  1086. ),
  1087. MODEL_TENSOR.V_ENC_FFN_GATE: (
  1088. "vision_tower.transformer.layers.{bid}.feed_forward.gate_proj", # pixtral-hf
  1089. "vision_encoder.transformer.layers.{bid}.feed_forward.w1", # pixtral
  1090. "visual.blocks.{bid}.mlp.gate_proj", # qwen2.5vl
  1091. ),
  1092. MODEL_TENSOR.V_ENC_FFN_DOWN: (
  1093. "vision_tower.vision_model.encoder.layers.{bid}.mlp.fc2",
  1094. "model.vision_tower.encoder.layer.{bid}.mlp.fc2", # Intern-S1
  1095. "vpm.encoder.layers.{bid}.mlp.fc2",
  1096. "model.vision_model.encoder.layers.{bid}.mlp.fc2", # SmolVLM, gemma3
  1097. "vision_tower.transformer.layers.{bid}.feed_forward.down_proj", # pixtral-hf
  1098. "vision_encoder.transformer.layers.{bid}.feed_forward.w2", # pixtral
  1099. "vision_model.model.layers.{bid}.mlp.fc2", # llama4
  1100. "visual.blocks.{bid}.mlp.fc2", # qwen2vl
  1101. "visual.blocks.{bid}.mlp.down_proj", # qwen2.5vl
  1102. "vision_tower.encoder.blocks.{bid}.mlp.fc1", # kimi-vl (fc0/fc1)
  1103. ),
  1104. MODEL_TENSOR.V_LAYER_SCALE_1: (
  1105. "vision_tower.vision_model.encoder.layers.{bid}.ls1", # InternVL
  1106. "model.vision_tower.encoder.layer.{bid}.lambda_1", # Intern-S1
  1107. ),
  1108. MODEL_TENSOR.V_LAYER_SCALE_2: (
  1109. "vision_tower.vision_model.encoder.layers.{bid}.ls2", # InternVL
  1110. "model.vision_tower.encoder.layer.{bid}.lambda_2", # Intern-S1
  1111. ),
  1112. MODEL_TENSOR.V_PRE_NORM: (
  1113. "vision_tower.vision_model.pre_layrnorm",
  1114. "vision_tower.ln_pre", # pixtral-hf
  1115. "vision_encoder.ln_pre", # pixtral
  1116. "vision_model.layernorm_pre", # llama4
  1117. ),
  1118. MODEL_TENSOR.V_POST_NORM: (
  1119. "vision_tower.vision_model.post_layernorm",
  1120. "model.vision_model.post_layernorm", # SmolVLM
  1121. "vision_model.layernorm_post", # llama4
  1122. "visual.merger.ln_q", # qwen2vl
  1123. "vision_tower.encoder.final_layernorm", # kimi-vl
  1124. ),
  1125. MODEL_TENSOR.V_MM_INP_PROJ: (
  1126. "multi_modal_projector.mm_input_projection",
  1127. ),
  1128. MODEL_TENSOR.V_MM_INP_NORM: (
  1129. "multi_modal_projector.norm",
  1130. "multi_modal_projector.layer_norm",
  1131. "multi_modal_projector.pre_norm",
  1132. "pre_mm_projector_norm",
  1133. ),
  1134. MODEL_TENSOR.V_MM_SOFT_EMB_NORM: (
  1135. "multi_modal_projector.mm_soft_emb_norm",
  1136. ),
  1137. MODEL_TENSOR.V_RESMPL_POS_EMBD_K: (
  1138. "resampler.pos_embed_k",
  1139. ),
  1140. MODEL_TENSOR.V_RESMPL_ATTN_Q: (
  1141. "resampler.attn.in_proj_q", # tensor generated from resampler.attn.in_proj
  1142. ),
  1143. MODEL_TENSOR.V_RESMPL_ATTN_K: (
  1144. "resampler.attn.in_proj_k", # tensor generated from resampler.attn.in_proj
  1145. ),
  1146. MODEL_TENSOR.V_RESMPL_ATTN_V: (
  1147. "resampler.attn.in_proj_v", # tensor generated from resampler.attn.in_proj
  1148. ),
  1149. MODEL_TENSOR.V_RESMPL_ATTN_OUT: (
  1150. "resampler.attn.out_proj",
  1151. ),
  1152. MODEL_TENSOR.V_RESMPL_KV: (
  1153. "resampler.kv_proj",
  1154. ),
  1155. MODEL_TENSOR.V_RESMPL_POST_NORM: (
  1156. "resampler.ln_post",
  1157. ),
  1158. MODEL_TENSOR.V_RESMPL_KV_NORM: (
  1159. "resampler.ln_kv",
  1160. ),
  1161. MODEL_TENSOR.V_RESMPL_Q_NORM: (
  1162. "resampler.ln_q",
  1163. ),
  1164. MODEL_TENSOR.V_RESMPL_PROJ: (
  1165. "resampler.proj",
  1166. ),
  1167. MODEL_TENSOR.V_RESMPL_QUERY: (
  1168. "resampler.query",
  1169. ),
  1170. MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK: (
  1171. "v.token_embd.img_break", # for pixtral, this is a generated vector
  1172. ),
  1173. MODEL_TENSOR.V_MM_PATCH_MERGER: (
  1174. "multi_modal_projector.patch_merger.merging_layer", # mistral small 3.1 - hf
  1175. "patch_merger.merging_layer", # mistral
  1176. ),
  1177. # audio (mtmd)
  1178. MODEL_TENSOR.A_ENC_EMBD_POS: (
  1179. "audio_tower.embed_positions", # ultravox
  1180. ),
  1181. MODEL_TENSOR.A_ENC_CONV1D: (
  1182. "audio_tower.conv{bid}", # ultravox
  1183. ),
  1184. MODEL_TENSOR.A_PRE_NORM: (),
  1185. MODEL_TENSOR.A_POST_NORM: (
  1186. "audio_tower.layer_norm", # ultravox
  1187. "audio_tower.ln_post", # qwen2omni
  1188. ),
  1189. MODEL_TENSOR.A_ENC_ATTN_Q: (
  1190. "audio_tower.layers.{bid}.self_attn.q_proj", # ultravox
  1191. ),
  1192. MODEL_TENSOR.A_ENC_ATTN_K: (
  1193. "audio_tower.layers.{bid}.self_attn.k_proj", # ultravox
  1194. ),
  1195. MODEL_TENSOR.A_ENC_ATTN_V: (
  1196. "audio_tower.layers.{bid}.self_attn.v_proj", # ultravox
  1197. ),
  1198. MODEL_TENSOR.A_ENC_INPUT_NORM: (
  1199. "audio_tower.layers.{bid}.self_attn_layer_norm", # ultravox
  1200. ),
  1201. MODEL_TENSOR.A_ENC_OUTPUT: (
  1202. "audio_tower.layers.{bid}.self_attn.out_proj", # ultravox
  1203. ),
  1204. MODEL_TENSOR.A_ENC_OUTPUT_NORM: (
  1205. "audio_tower.layers.{bid}.final_layer_norm", # ultravox
  1206. ),
  1207. MODEL_TENSOR.A_ENC_FFN_UP: (
  1208. "audio_tower.layers.{bid}.fc1", # ultravox
  1209. ),
  1210. MODEL_TENSOR.A_ENC_FFN_GATE: (),
  1211. MODEL_TENSOR.A_ENC_FFN_DOWN: (
  1212. "audio_tower.layers.{bid}.fc2", # ultravox
  1213. ),
  1214. # note: some tensors below has "audio." pseudo-prefix, to prevent conflicts with vision tensors
  1215. # this prefix is added in the conversion code in modify_tensors()
  1216. MODEL_TENSOR.A_MMPROJ: (
  1217. "audio.multi_modal_projector.linear_{bid}", # ultravox
  1218. ),
  1219. MODEL_TENSOR.A_MMPROJ_FC: (
  1220. "audio.multi_modal_projector.linear", # qwen2audio
  1221. "audio_tower.proj", # qwen2omni
  1222. ),
  1223. MODEL_TENSOR.A_MM_NORM_PRE: (
  1224. "audio.multi_modal_projector.ln_pre", # ultravox
  1225. ),
  1226. MODEL_TENSOR.A_MM_NORM_MID: (
  1227. "audio.multi_modal_projector.ln_mid", # ultravox
  1228. ),
  1229. # NextN/MTP tensors for GLM4_MOE
  1230. MODEL_TENSOR.NEXTN_EH_PROJ: (
  1231. "model.layers.{bid}.eh_proj",
  1232. ),
  1233. MODEL_TENSOR.NEXTN_EMBED_TOKENS: (
  1234. "model.layers.{bid}.embed_tokens",
  1235. ),
  1236. MODEL_TENSOR.NEXTN_ENORM: (
  1237. "model.layers.{bid}.enorm",
  1238. ),
  1239. MODEL_TENSOR.NEXTN_HNORM: (
  1240. "model.layers.{bid}.hnorm",
  1241. ),
  1242. MODEL_TENSOR.NEXTN_SHARED_HEAD_HEAD: (
  1243. "model.layers.{bid}.shared_head.head",
  1244. ),
  1245. MODEL_TENSOR.NEXTN_SHARED_HEAD_NORM: (
  1246. "model.layers.{bid}.shared_head.norm",
  1247. ),
  1248. }
  1249. # architecture-specific block mappings
  1250. arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
  1251. MODEL_ARCH.ARCTIC: {
  1252. MODEL_TENSOR.FFN_NORM: (
  1253. "model.layers.{bid}.residual_layernorm",
  1254. ),
  1255. MODEL_TENSOR.FFN_NORM_EXP: (
  1256. "model.layers.{bid}.post_attention_layernorm",
  1257. ),
  1258. },
  1259. }
  1260. mapping: dict[str, tuple[MODEL_TENSOR, str]]
  1261. def __init__(self, arch: MODEL_ARCH, n_blocks: int):
  1262. self.mapping = {}
  1263. for tensor, keys in self.mappings_cfg.items():
  1264. if tensor not in MODEL_TENSORS[arch]:
  1265. continue
  1266. tensor_name = TENSOR_NAMES[tensor]
  1267. self.mapping[tensor_name] = (tensor, tensor_name)
  1268. for key in keys:
  1269. self.mapping[key] = (tensor, tensor_name)
  1270. if arch in self.arch_block_mappings_cfg:
  1271. self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
  1272. for bid in range(n_blocks):
  1273. for tensor, keys in self.block_mappings_cfg.items():
  1274. if tensor not in MODEL_TENSORS[arch]:
  1275. continue
  1276. tensor_name = TENSOR_NAMES[tensor].format(bid = bid)
  1277. self.mapping[tensor_name] = (tensor, tensor_name)
  1278. for key in keys:
  1279. key = key.format(bid = bid)
  1280. self.mapping[key] = (tensor, tensor_name)
  1281. def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
  1282. result = self.mapping.get(key)
  1283. if result is not None:
  1284. return result
  1285. for suffix in try_suffixes:
  1286. if key.endswith(suffix):
  1287. result = self.mapping.get(key[:-len(suffix)])
  1288. if result is not None:
  1289. return result[0], result[1] + suffix
  1290. return None
  1291. def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
  1292. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  1293. if result is None:
  1294. return None
  1295. return result[1]
  1296. def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
  1297. result = self.get_type_and_name(key, try_suffixes = try_suffixes)
  1298. if result is None:
  1299. return None
  1300. return result[0]
  1301. def __getitem__(self, key: str) -> str:
  1302. try:
  1303. return self.mapping[key][1]
  1304. except KeyError:
  1305. raise KeyError(key)
  1306. def __contains__(self, key: str) -> bool:
  1307. return key in self.mapping
  1308. def __repr__(self) -> str:
  1309. return repr(self.mapping)
  1310. def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
  1311. return TensorNameMap(arch, n_blocks)