run-org-model.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. #!/usr/bin/env python3
  2. import argparse
  3. import os
  4. import sys
  5. import importlib
  6. import torch
  7. import numpy as np
  8. from pathlib import Path
  9. from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForImageTextToText, AutoConfig
  10. # Add parent directory to path for imports
  11. sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
  12. from utils.common import debug_hook
  13. def parse_arguments():
  14. parser = argparse.ArgumentParser(description="Process model with specified path")
  15. parser.add_argument("--model-path", "-m", help="Path to the model")
  16. parser.add_argument("--prompt-file", "-f", help="Optional prompt file", required=False)
  17. parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose debug output")
  18. parser.add_argument("--device", "-d", help="Device to use (cpu, cuda, mps, auto)", default="auto")
  19. return parser.parse_args()
  20. def load_model_and_tokenizer(model_path, device="auto"):
  21. print("Loading model and tokenizer using AutoTokenizer:", model_path)
  22. tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
  23. config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
  24. multimodal = False
  25. full_config = config
  26. # Determine device_map based on device argument
  27. if device == "cpu":
  28. device_map = {"": "cpu"}
  29. print("Forcing CPU usage")
  30. elif device == "auto":
  31. device_map = "auto"
  32. else:
  33. device_map = {"": device}
  34. print("Model type: ", config.model_type)
  35. if "vocab_size" not in config and "text_config" in config:
  36. config = config.text_config
  37. multimodal = True
  38. print("Vocab size: ", config.vocab_size)
  39. print("Hidden size: ", config.hidden_size)
  40. print("Number of layers: ", config.num_hidden_layers)
  41. print("BOS token id: ", config.bos_token_id)
  42. print("EOS token id: ", config.eos_token_id)
  43. unreleased_model_name = os.getenv("UNRELEASED_MODEL_NAME")
  44. if unreleased_model_name:
  45. model_name_lower = unreleased_model_name.lower()
  46. unreleased_module_path = (
  47. f"transformers.models.{model_name_lower}.modular_{model_name_lower}"
  48. )
  49. class_name = f"{unreleased_model_name}ForCausalLM"
  50. print(f"Importing unreleased model module: {unreleased_module_path}")
  51. try:
  52. model_class = getattr(importlib.import_module(unreleased_module_path), class_name)
  53. model = model_class.from_pretrained(
  54. model_path,
  55. device_map=device_map,
  56. offload_folder="offload",
  57. trust_remote_code=True,
  58. config=config
  59. )
  60. except (ImportError, AttributeError) as e:
  61. print(f"Failed to import or load model: {e}")
  62. exit(1)
  63. else:
  64. if multimodal:
  65. model = AutoModelForImageTextToText.from_pretrained(
  66. model_path,
  67. device_map=device_map,
  68. offload_folder="offload",
  69. trust_remote_code=True,
  70. config=full_config
  71. )
  72. else:
  73. model = AutoModelForCausalLM.from_pretrained(
  74. model_path,
  75. device_map=device_map,
  76. offload_folder="offload",
  77. trust_remote_code=True,
  78. config=config
  79. )
  80. print(f"Model class: {model.__class__.__name__}")
  81. return model, tokenizer, config
  82. def enable_torch_debugging(model):
  83. for name, module in model.named_modules():
  84. if len(list(module.children())) == 0: # only leaf modules
  85. module.register_forward_hook(debug_hook(name))
  86. def get_prompt(args):
  87. if args.prompt_file:
  88. with open(args.prompt_file, encoding='utf-8') as f:
  89. return f.read()
  90. elif os.getenv("MODEL_TESTING_PROMPT"):
  91. return os.getenv("MODEL_TESTING_PROMPT")
  92. else:
  93. return "Hello, my name is"
  94. def main():
  95. args = parse_arguments()
  96. model_path = os.environ.get("MODEL_PATH", args.model_path)
  97. if model_path is None:
  98. print("Error: Model path must be specified either via --model-path argument or MODEL_PATH environment variable")
  99. sys.exit(1)
  100. model, tokenizer, config = load_model_and_tokenizer(model_path, args.device)
  101. if args.verbose:
  102. enable_torch_debugging(model)
  103. model_name = os.path.basename(model_path)
  104. # Iterate over the model parameters (the tensors) and get the first one
  105. # and use it to get the device the model is on.
  106. device = next(model.parameters()).device
  107. prompt = get_prompt(args)
  108. input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
  109. print(f"Input tokens: {input_ids}")
  110. print(f"Input text: {repr(prompt)}")
  111. print(f"Tokenized: {tokenizer.convert_ids_to_tokens(input_ids[0])}")
  112. batch_size = 512
  113. with torch.no_grad():
  114. past = None
  115. outputs = None
  116. for i in range(0, input_ids.size(1), batch_size):
  117. print(f"Processing chunk with tokens {i} to {i + batch_size}")
  118. chunk = input_ids[:, i:i + batch_size]
  119. outputs = model(chunk.to(model.device), past_key_values=past, use_cache=True)
  120. past = outputs.past_key_values
  121. logits = outputs.logits # type: ignore
  122. # Extract logits for the last token (next token prediction)
  123. last_logits = logits[0, -1, :].float().cpu().numpy()
  124. print(f"Logits shape: {logits.shape}")
  125. print(f"Last token logits shape: {last_logits.shape}")
  126. print(f"Vocab size: {len(last_logits)}")
  127. data_dir = Path("data")
  128. data_dir.mkdir(exist_ok=True)
  129. bin_filename = data_dir / f"pytorch-{model_name}.bin"
  130. txt_filename = data_dir / f"pytorch-{model_name}.txt"
  131. # Save to file for comparison
  132. last_logits.astype(np.float32).tofile(bin_filename)
  133. # Also save as text file for easy inspection
  134. with open(txt_filename, "w") as f:
  135. for i, logit in enumerate(last_logits):
  136. f.write(f"{i}: {logit:.6f}\n")
  137. # Print some sample logits for quick verification
  138. print(f"First 10 logits: {last_logits[:10]}")
  139. print(f"Last 10 logits: {last_logits[-10:]}")
  140. # Show top 5 predicted tokens
  141. top_indices = np.argsort(last_logits)[-5:][::-1]
  142. print("Top 5 predictions:")
  143. for idx in top_indices:
  144. token = tokenizer.decode([idx])
  145. print(f" Token {idx} ({repr(token)}): {last_logits[idx]:.6f}")
  146. print(f"Saved bin logits to: {bin_filename}")
  147. print(f"Saved txt logist to: {txt_filename}")
  148. if __name__ == "__main__":
  149. main()