mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 21:34:36 +00:00
fc0c8d286a
This commit updates the surgery script to not remove the tensors from the model file. For this to work the `--skip-unknown` flag is added as an argument to the convert.py script in README.md. The motivation for this change is that the surgery script currently removes the projector tensors from the model file. If the model was checked out from a repository, the model file will have been updated and have to be checked out again to reset this effect. If this can be avoided I think it would be preferable. I did not perform this change for BakLLaVA models as I am not sure how that part works.
43 lines
1.5 KiB
Python
43 lines
1.5 KiB
Python
import argparse
|
|
import glob
|
|
import os
|
|
import torch
|
|
|
|
|
|
ap = argparse.ArgumentParser()
|
|
ap.add_argument("-m", "--model", help="Path to LLaVA v1.5 model")
|
|
args = ap.parse_args()
|
|
|
|
# find the model part that includes the the multimodal projector weights
|
|
path = sorted(glob.glob(f"{args.model}/pytorch_model*.bin"))[-1]
|
|
checkpoint = torch.load(path)
|
|
|
|
# get a list of mm tensor names
|
|
mm_tensors = [k for k, v in checkpoint.items() if k.startswith("model.mm_projector")]
|
|
|
|
# store these tensors in a new dictionary and torch.save them
|
|
projector = {name: checkpoint[name].float() for name in mm_tensors}
|
|
torch.save(projector, f"{args.model}/llava.projector")
|
|
|
|
# BakLLaVA models contain CLIP tensors in it
|
|
clip_tensors = [k for k, v in checkpoint.items() if k.startswith("model.vision_tower")]
|
|
if len(clip_tensors) > 0:
|
|
clip = {name.replace("vision_tower.vision_tower.", ""): checkpoint[name].float() for name in clip_tensors}
|
|
torch.save(clip, f"{args.model}/llava.clip")
|
|
|
|
# remove these tensors
|
|
for name in clip_tensors:
|
|
del checkpoint[name]
|
|
|
|
# added tokens should be removed to be able to convert Mistral models
|
|
if os.path.exists(f"{args.model}/added_tokens.json"):
|
|
with open(f"{args.model}/added_tokens.json", "w") as f:
|
|
f.write("{}\n")
|
|
|
|
|
|
torch.save(checkpoint, path)
|
|
|
|
print("Done!")
|
|
print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
|
|
print(f"Also, use {args.model}/llava.projector to prepare a llava-encoder.gguf file.")
|