diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index e0da3a04d..b3e190a0f 100644 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -250,7 +250,7 @@ for part_name in part_names: sys.exit() n_dims = len(data.shape) - data_dtype = data.dtype + data_dtype = data.dtype # if f32 desired, convert any float16 to float32 if ftype == 0 and data_dtype == np.float16: diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 3a606b55d..ab5c80b69 100644 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -170,7 +170,7 @@ if Path(dir_model + "/tokenizer.model").is_file(): print("gguf: get added tokens") for key in addtokens_json: - tokens.append( key.encode("utf-8") ) + tokens.append( key.encode("utf-8") ) scores.append(-1000.0) toktypes.append(4) # user-defined token type diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index d5b3897c7..f8cfdaa80 100644 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -178,7 +178,7 @@ if Path(dir_model + "/tokenizer.model").is_file(): print("gguf: get added tokens") for key in addtokens_json: - tokens.append( key.encode("utf-8") ) + tokens.append( key.encode("utf-8") ) scores.append(-1000.0) toktypes.append(4) # user-defined token type @@ -294,7 +294,7 @@ for part_name in part_names: sys.exit() n_dims = len(data.shape) - data_dtype = data.dtype + data_dtype = data.dtype # if f32 desired, convert any float16 to float32 if ftype == 0 and data_dtype == np.float16: