From b9bd1d014113b7498f04ad4d28e6021d5f4cddad Mon Sep 17 00:00:00 2001 From: Oleksandr Nikitin Date: Sun, 12 Mar 2023 14:16:33 +0200 Subject: [PATCH] python/pytorch compat notes (#44) --- README.md | 1 + convert-pth-to-ggml.py | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 343786206..1c80d8a33 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,7 @@ ls ./models 65B 30B 13B 7B tokenizer_checklist.chk tokenizer.model # install Python dependencies +# preferred versions: python 3.10 (not 3.11), torch 1.13.1+ python3 -m pip install torch numpy sentencepiece # convert the 7B model to ggml FP16 format diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index 98693e305..ef50fc65a 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -86,6 +86,7 @@ for p in range(n_parts): if (p > 0): fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + "." + str(p) + # weights_only requires torch 1.13.1, remove this param or update if you get an "invalid keyword argument" error model = torch.load(fname_model, map_location="cpu", weights_only=True) fout = open(fname_out, "wb")