llava_surgery_v2.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. import argparse
  2. import glob
  3. import os
  4. import torch
  5. from safetensors import safe_open
  6. from safetensors.torch import save_file
  7. from typing import Any, ContextManager, cast
  8. # Function to determine if file is a SafeTensor file
  9. def is_safetensor_file(file_path):
  10. return file_path.endswith('.safetensors')
  11. # Unified loading function
  12. def load_model(file_path):
  13. if is_safetensor_file(file_path):
  14. tensors = {}
  15. with cast(ContextManager[Any], safe_open(file_path, framework="pt", device="cpu")) as f:
  16. for key in f.keys():
  17. tensors[key] = f.get_tensor(key).clone()
  18. # output shape
  19. print(f"{key} : {tensors[key].shape}")
  20. return tensors, 'safetensor'
  21. else:
  22. return torch.load(file_path, map_location=torch.device('cpu')), 'pytorch'
  23. # Unified saving function
  24. def save_model(model, file_path, file_type):
  25. if file_type == 'safetensor':
  26. # safe_save(model, file_path)
  27. save_file(model, file_path)
  28. else:
  29. torch.save(model, file_path)
  30. # Adapted function to clean vision tower from checkpoint
  31. def clean_vision_tower_from_checkpoint(checkpoint_path):
  32. checkpoint, file_type = load_model(checkpoint_path)
  33. # file_type = 'pytorch'
  34. model_path = os.path.dirname(checkpoint_path)
  35. print(f"Searching for vision tower tensors in {checkpoint_path}")
  36. clip_tensors = [k for k, v in checkpoint.items() if (k.startswith("model.vision_tower") or k.startswith("vit."))]
  37. if len(clip_tensors) > 0:
  38. print(f"Found {len(clip_tensors)} tensors to extract from {checkpoint_path}")
  39. # Adapted for file type
  40. clip_path = os.path.join(model_path, "llava.clip")
  41. if os.path.exists(clip_path):
  42. print(f"Loading existing llava.clip from {clip_path}")
  43. existing_clip, _ = load_model(clip_path)
  44. else:
  45. print(f"Creating new llava.clip at {clip_path}")
  46. existing_clip = {}
  47. # Update existing_clip with new tensors, avoid duplicates
  48. for name in clip_tensors:
  49. simple_name = name[name.index('vision_model.'):] if 'vision_model.' in name else name
  50. print(f"Adding {simple_name} to llava.clip")
  51. if simple_name not in existing_clip:
  52. existing_clip[simple_name] = checkpoint[name]
  53. # Save the updated clip tensors back to llava.clip
  54. save_model(existing_clip, clip_path, 'pytorch')
  55. # Remove the tensors from the original checkpoint
  56. for name in clip_tensors:
  57. del checkpoint[name]
  58. checkpoint_path = checkpoint_path
  59. return True
  60. return False
  61. def find_relevant_checkpoints(checkpoint_paths, newline_criteria, projector):
  62. newline_checkpoint_path = None
  63. projector_checkpoint_path = None
  64. for path in checkpoint_paths:
  65. checkpoint, _ = load_model(path)
  66. if newline_criteria(checkpoint) and newline_checkpoint_path is None:
  67. newline_checkpoint_path = path
  68. if projector(checkpoint):
  69. projector_checkpoint_path = path
  70. return newline_checkpoint_path, projector_checkpoint_path
  71. def newline_criteria(checkpoint):
  72. return any(k.startswith("model.image_newline") for k in checkpoint.keys())
  73. def proj_criteria(checkpoint):
  74. return any(k.startswith("model.mm_projector") or k.startswith("vision_proj.") for k in checkpoint.keys())
  75. # Command-line interface setup
  76. ap = argparse.ArgumentParser()
  77. ap.add_argument("-m", "--model", required=True, help="Path to LLaVA v1.5+ model")
  78. ap.add_argument("-C", "--clean-vision-tower", action="store_true", help="Remove any vision tower from the model files")
  79. args = ap.parse_args()
  80. if args.clean_vision_tower:
  81. # Generalized to handle both PyTorch and SafeTensors models
  82. model_files = sorted(glob.glob(f"{args.model}/*"), key=os.path.getmtime, reverse=True)
  83. # checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and path.startswith('pytorch')) or (path.endswith('.safetensors') and path.startswith('model'))]
  84. checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and 'pytorch' in path.split('/')[-1].split('\\')[-1]) or (path.endswith('.safetensors') and 'model' in path.split('/')[-1].split('\\')[-1])]
  85. for projector_checkpoint_path in checkpoint_paths:
  86. print(f"Cleaning {projector_checkpoint_path}")
  87. if not clean_vision_tower_from_checkpoint(projector_checkpoint_path):
  88. print(f"No vision tower found in {projector_checkpoint_path}")
  89. # we break once none is found, so far all models append them at the end
  90. # break
  91. print("Done! All vision tower tensors are removed from the model files and stored in llava.clip file.")
  92. # Now we look for the projector in the last checkpoint
  93. model_files = sorted(glob.glob(f"{args.model}/*"), key=os.path.getmtime, reverse=True)
  94. checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and 'pytorch' in path.split('/')[-1].split('\\')[-1]) or (path.endswith('.safetensors') and 'model' in path.split('/')[-1].split('\\')[-1])]
  95. # last_checkpoint_path = checkpoint_paths[0]
  96. # first_checkpoint_path = checkpoint_paths[-1]
  97. newline_checkpoint_path, projector_checkpoint_path = find_relevant_checkpoints(checkpoint_paths, newline_criteria, proj_criteria)
  98. print(f"Taking projector from {projector_checkpoint_path}")
  99. first_mm_tensors = []
  100. first_checkpoint = None
  101. if newline_checkpoint_path is not None:
  102. print(f"Taking newline from {newline_checkpoint_path}")
  103. first_checkpoint, file_type = load_model(newline_checkpoint_path)
  104. first_mm_tensors = [k for k, v in first_checkpoint.items() if k.startswith("model.image_newline")]
  105. # Load the checkpoint
  106. mm_tensors = []
  107. last_checkpoint = None
  108. if projector_checkpoint_path is not None:
  109. last_checkpoint, file_type = load_model(projector_checkpoint_path)
  110. mm_tensors = [k for k, v in last_checkpoint.items() if k.startswith("model.mm_projector") or k.startswith("vision_proj.")]
  111. if len(mm_tensors) == 0:
  112. if last_checkpoint is not None:
  113. for k, v in last_checkpoint.items():
  114. print(k)
  115. print(f"Found {len(mm_tensors)} tensors to extract out of {len(last_checkpoint) if last_checkpoint is not None else 0} tensors.")
  116. print("No tensors found. Is this a LLaVA model?")
  117. exit()
  118. print(f"Found {len(mm_tensors)} tensors to extract.")
  119. print(f"Found additional {len(first_mm_tensors)} tensors to extract.")
  120. # projector = {name: checkpoint.[name].float() for name in mm_tensors}
  121. projector = {}
  122. for name in mm_tensors:
  123. assert last_checkpoint is not None
  124. projector[name] = last_checkpoint[name].float()
  125. for name in first_mm_tensors:
  126. assert first_checkpoint is not None
  127. projector[name] = first_checkpoint[name].float()
  128. if len(projector) > 0:
  129. save_model(projector, f"{args.model}/llava.projector", 'pytorch')
  130. print("Done!")
  131. print(f"Now you can convert {args.model} to a a regular LLaMA GGUF file.")
  132. print(f"Also, use {args.model}/llava.projector to prepare a llava-encoder.gguf file.")