diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 875a8c550..ecc53d803 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1309,8 +1309,10 @@ class LlamaModel(Model): # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256) if self.hparams.get("vocab_size", 32000) == 32016: - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False, - special_token_types = ['prefix', 'suffix', 'middle', 'eot']) + special_vocab = gguf.SpecialVocab( + self.dir_model, load_merges=False, + special_token_types = ['prefix', 'suffix', 'middle', 'eot'] + ) special_vocab._set_special_token("prefix", 32007) special_vocab._set_special_token("suffix", 32008) special_vocab._set_special_token("middle", 32009)