From 6fbd43221167bf96112f899daf22c127b282cbcf Mon Sep 17 00:00:00 2001 From: Brian Date: Sun, 5 May 2024 15:07:48 +1000 Subject: [PATCH] py : logging and flake8 suppression refactoring (#7081) Set one as executable and add basicConfig() to another. Also added noqa tag to test scripts. --- .flake8 | 15 ++++++++++++++- convert-hf-to-gguf-update.py | 2 ++ convert-lora-to-ggml.py | 1 + scripts/gen-unicode-data.py | 14 +++++++------- tests/test-tokenizer-0.py | 10 +++++----- 5 files changed, 29 insertions(+), 13 deletions(-) mode change 100644 => 100755 convert-hf-to-gguf-update.py diff --git a/.flake8 b/.flake8 index bc41c2290..d64c2564a 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,17 @@ [flake8] max-line-length = 125 ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503 -exclude = examples/*,examples/*/**,*/**/__init__.py,scripts/gen-unicode-data.py,tests/test-tokenizer-0.py +exclude = + # Do not traverse examples + examples, + # Do not include package initializers + __init__.py, + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + # No need to include the build path + build, + # This contains builds that we don't want to check + dist # This is generated with `python build .` for package releases +# max-complexity = 10 diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py old mode 100644 new mode 100755 index 917a4469d..b41a9290e --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + # This script downloads the tokenizer models of the specified models from Huggingface and # generates the get_vocab_base_pre() function for convert-hf-to-gguf.py # diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index 39536feb9..f09fa85fe 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -16,6 +16,7 @@ if 'NO_LOCAL_GGUF' not in os.environ: sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) import gguf +logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("lora-to-gguf") NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1} diff --git a/scripts/gen-unicode-data.py b/scripts/gen-unicode-data.py index d49cbf2a0..977e65613 100644 --- a/scripts/gen-unicode-data.py +++ b/scripts/gen-unicode-data.py @@ -41,20 +41,20 @@ def get_matches(regex_expr): def print_cat(cat, ranges): - print("const std::vector> unicode_ranges_{} = {{".format(cat)) + print("const std::vector> unicode_ranges_{} = {{".format(cat)) # noqa: NP100 cnt = 0 for start, end in ranges: if cnt % 4 != 0: - print(" ", end="") - print("{{0x{:08X}, 0x{:08X}}},".format(start, end), end="") + print(" ", end="") # noqa: NP100 + print("{{0x{:08X}, 0x{:08X}}},".format(start, end), end="") # noqa: NP100 if cnt % 4 == 3: - print("") + print("") # noqa: NP100 cnt += 1 if cnt % 4 != 0: - print("") - print("};") - print("") + print("") # noqa: NP100 + print("};") # noqa: NP100 + print("") # noqa: NP100 print_cat("number", get_matches(r'\p{N}')) diff --git a/tests/test-tokenizer-0.py b/tests/test-tokenizer-0.py index 8e7638e42..cd760d1ce 100644 --- a/tests/test-tokenizer-0.py +++ b/tests/test-tokenizer-0.py @@ -13,7 +13,7 @@ fname_tok = args.fname_tok tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer) -print('tokenizing file: ', fname_tok) +print('tokenizing file: ', fname_tok) # noqa: NP100 fname_out = fname_tok + '.tok' with open(fname_tok, 'r', encoding='utf-8') as f: lines = f.readlines() @@ -21,7 +21,7 @@ with open(fname_tok, 'r', encoding='utf-8') as f: t_start = time.time() res = tokenizer.encode(s, add_special_tokens=False) t_end = time.time() - print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)') + print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)') # noqa: NP100 with open(fname_out, 'w', encoding='utf-8') as f: for x in res: # LLaMA v3 for some reason strips the space for these tokens (and others) @@ -41,6 +41,6 @@ with open(fname_tok, 'r', encoding='utf-8') as f: # f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n') # f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n') f.write(str(x) + '\n') - print('len(res): ', len(res)) - print('len(lines): ', len(lines)) -print('results written to: ', fname_out) + print('len(res): ', len(res)) # noqa: NP100 + print('len(lines): ', len(lines)) # noqa: NP100 +print('results written to: ', fname_out) # noqa: NP100