1
0

run-org-model.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. #!/usr/bin/env python3
  2. import argparse
  3. import os
  4. import sys
  5. import importlib
  6. import torch
  7. import numpy as np
  8. from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForImageTextToText, AutoConfig
  9. # Add parent directory to path for imports
  10. sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
  11. from utils.common import debug_hook, save_output_data
  12. def parse_arguments():
  13. parser = argparse.ArgumentParser(description="Process model with specified path")
  14. parser.add_argument("--model-path", "-m", help="Path to the model")
  15. parser.add_argument("--prompt-file", "-f", help="Optional prompt file", required=False)
  16. parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose debug output")
  17. parser.add_argument("--device", "-d", help="Device to use (cpu, cuda, mps, auto)", default="auto")
  18. return parser.parse_args()
  19. def load_model_and_tokenizer(model_path, device="auto"):
  20. print("Loading model and tokenizer using AutoTokenizer:", model_path)
  21. tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
  22. config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
  23. multimodal = False
  24. full_config = config
  25. # Determine device_map based on device argument
  26. if device == "cpu":
  27. device_map = {"": "cpu"}
  28. print("Forcing CPU usage")
  29. elif device == "auto":
  30. device_map = "auto"
  31. else:
  32. device_map = {"": device}
  33. print("Model type: ", config.model_type)
  34. if "vocab_size" not in config and "text_config" in config:
  35. config = config.text_config
  36. multimodal = True
  37. print("Vocab size: ", config.vocab_size)
  38. print("Hidden size: ", config.hidden_size)
  39. print("Number of layers: ", config.num_hidden_layers)
  40. print("BOS token id: ", config.bos_token_id)
  41. print("EOS token id: ", config.eos_token_id)
  42. unreleased_model_name = os.getenv("UNRELEASED_MODEL_NAME")
  43. if unreleased_model_name:
  44. model_name_lower = unreleased_model_name.lower()
  45. unreleased_module_path = (
  46. f"transformers.models.{model_name_lower}.modular_{model_name_lower}"
  47. )
  48. class_name = f"{unreleased_model_name}ForCausalLM"
  49. print(f"Importing unreleased model module: {unreleased_module_path}")
  50. try:
  51. model_class = getattr(importlib.import_module(unreleased_module_path), class_name)
  52. model = model_class.from_pretrained(
  53. model_path,
  54. device_map=device_map,
  55. offload_folder="offload",
  56. trust_remote_code=True,
  57. config=config
  58. )
  59. except (ImportError, AttributeError) as e:
  60. print(f"Failed to import or load model: {e}")
  61. exit(1)
  62. else:
  63. if multimodal:
  64. model = AutoModelForImageTextToText.from_pretrained(
  65. model_path,
  66. device_map=device_map,
  67. offload_folder="offload",
  68. trust_remote_code=True,
  69. config=full_config
  70. )
  71. else:
  72. model = AutoModelForCausalLM.from_pretrained(
  73. model_path,
  74. device_map=device_map,
  75. offload_folder="offload",
  76. trust_remote_code=True,
  77. config=config
  78. )
  79. print(f"Model class: {model.__class__.__name__}")
  80. return model, tokenizer, config
  81. def enable_torch_debugging(model):
  82. for name, module in model.named_modules():
  83. if len(list(module.children())) == 0: # only leaf modules
  84. module.register_forward_hook(debug_hook(name))
  85. def get_prompt(args):
  86. if args.prompt_file:
  87. with open(args.prompt_file, encoding='utf-8') as f:
  88. return f.read()
  89. elif os.getenv("MODEL_TESTING_PROMPT"):
  90. return os.getenv("MODEL_TESTING_PROMPT")
  91. else:
  92. return "Hello, my name is"
  93. def main():
  94. args = parse_arguments()
  95. model_path = os.environ.get("MODEL_PATH", args.model_path)
  96. if model_path is None:
  97. print("Error: Model path must be specified either via --model-path argument or MODEL_PATH environment variable")
  98. sys.exit(1)
  99. model, tokenizer, config = load_model_and_tokenizer(model_path, args.device)
  100. if args.verbose:
  101. enable_torch_debugging(model)
  102. model_name = os.path.basename(model_path)
  103. # Iterate over the model parameters (the tensors) and get the first one
  104. # and use it to get the device the model is on.
  105. device = next(model.parameters()).device
  106. prompt = get_prompt(args)
  107. input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
  108. token_ids = input_ids[0].cpu().tolist()
  109. print(f"Input tokens: {input_ids}")
  110. print(f"Input text: {repr(prompt)}")
  111. print(f"Tokenized: {tokenizer.convert_ids_to_tokens(input_ids[0])}")
  112. batch_size = 512
  113. with torch.no_grad():
  114. past = None
  115. outputs = None
  116. for i in range(0, input_ids.size(1), batch_size):
  117. print(f"Processing chunk with tokens {i} to {i + batch_size}")
  118. chunk = input_ids[:, i:i + batch_size]
  119. outputs = model(chunk.to(model.device), past_key_values=past, use_cache=True)
  120. past = outputs.past_key_values
  121. logits = outputs.logits # type: ignore
  122. # Extract logits for the last token (next token prediction)
  123. last_logits = logits[0, -1, :].float().cpu().numpy()
  124. print(f"Logits shape: {logits.shape}")
  125. print(f"Last token logits shape: {last_logits.shape}")
  126. print(f"Vocab size: {len(last_logits)}")
  127. # Print some sample logits for quick verification
  128. print(f"First 10 logits: {last_logits[:10]}")
  129. print(f"Last 10 logits: {last_logits[-10:]}")
  130. # Show top 5 predicted tokens
  131. top_indices = np.argsort(last_logits)[-5:][::-1]
  132. print("Top 5 predictions:")
  133. for idx in top_indices:
  134. token = tokenizer.decode([idx])
  135. print(f" Token {idx} ({repr(token)}): {last_logits[idx]:.6f}")
  136. save_output_data(last_logits, token_ids, prompt, model_name)
  137. if __name__ == "__main__":
  138. main()