From 148ec970b62c3c5ae0a8bfdaad2fc237aaae350d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 5 Jul 2024 10:15:36 +0300 Subject: [PATCH] convert : remove AWQ remnants (#8320) --- convert_hf_to_gguf.py | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index ed5490593..455eea883 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2445,7 +2445,7 @@ class Gemma2Model(Model): raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head") def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unusem + del bid # unused # lm_head is not used in llama.cpp, while autoawq will include this tensor in model # To prevent errors, skip loading lm_head.weight. @@ -3225,10 +3225,6 @@ def parse_args() -> argparse.Namespace: "--vocab-only", action="store_true", help="extract only the vocab", ) - parser.add_argument( - "--awq-path", type=Path, default=None, - help="Path to scale awq cache file", - ) parser.add_argument( "--outfile", type=Path, help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", @@ -3306,19 +3302,6 @@ def main() -> None: dir_model = args.model - if args.awq_path: - sys.path.insert(1, str(Path(__file__).parent / 'awq-py')) - from awq.apply_awq import add_scale_weights # type: ignore[import-not-found] - tmp_model_path = args.model / "weighted_model" - dir_model = tmp_model_path - if tmp_model_path.is_dir(): - logger.info(f"{tmp_model_path} exists as a weighted model.") - else: - tmp_model_path.mkdir(parents=True, exist_ok=True) - logger.info("Saving new weighted model ...") - add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path)) - logger.info(f"Saved weighted model at {tmp_model_path}.") - if not dir_model.is_dir(): logger.error(f'Error: {args.model} is not a directory') sys.exit(1)