|
@@ -25,9 +25,6 @@ if len(clip_tensors) > 0:
|
|
|
clip = {name.replace("vision_tower.vision_tower.", ""): checkpoint[name].float() for name in clip_tensors}
|
|
clip = {name.replace("vision_tower.vision_tower.", ""): checkpoint[name].float() for name in clip_tensors}
|
|
|
torch.save(clip, f"{args.model}/llava.clip")
|
|
torch.save(clip, f"{args.model}/llava.clip")
|
|
|
|
|
|
|
|
- # remove these tensors
|
|
|
|
|
- for name in clip_tensors:
|
|
|
|
|
- del checkpoint[name]
|
|
|
|
|
|
|
|
|
|
# added tokens should be removed to be able to convert Mistral models
|
|
# added tokens should be removed to be able to convert Mistral models
|
|
|
if os.path.exists(f"{args.model}/added_tokens.json"):
|
|
if os.path.exists(f"{args.model}/added_tokens.json"):
|
|
@@ -35,7 +32,6 @@ if len(clip_tensors) > 0:
|
|
|
f.write("{}\n")
|
|
f.write("{}\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
- torch.save(checkpoint, path)
|
|
|
|
|
|
|
|
|
|
print("Done!")
|
|
print("Done!")
|
|
|
print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
|
|
print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
|