From 2b2646931bd2a2eb3e21c6f3733cc0e090b2e24b Mon Sep 17 00:00:00 2001 From: Tom Jobbins <784313+TheBloke@users.noreply.github.com> Date: Tue, 16 May 2023 23:04:35 +0100 Subject: [PATCH] convert.py: Support models which are stored in a single pytorch_model.bin (#1469) * Support models in a single pytorch_model.bin * Remove spurious line with typo --- convert.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/convert.py b/convert.py index 8f4f0399e..ece5a0266 100644 --- a/convert.py +++ b/convert.py @@ -121,7 +121,6 @@ def make_tensors_list() -> List[str]: f'layers.{i}.feed_forward.w1.weight', f'layers.{i}.feed_forward.w2.weight', f'layers.{i}.feed_forward.w3.weight', - f'layers.{i}.atttention_norm.weight', f'layers.{i}.ffn_norm.weight', ] return ret @@ -1055,7 +1054,7 @@ def load_some_model(path: Path) -> ModelPlus: files = list(path.glob("model-00001-of-*.safetensors")) if not files: # Try the PyTorch patterns too, with lower priority - globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt"] + globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin" ] files = [file for glob in globs for file in path.glob(glob)] if not files: # Try GGML too, but with lower priority, since if both a non-GGML