mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 03:14:35 +00:00
convert : add support of codeqwen due to tokenizer (#6707)
* add support of codeqwen due to tokenizer * override load_hparams * fix typo * fix load_params * convert : fix whitespace --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
c8297c6af5
commit
3fec68be4e
@ -363,6 +363,16 @@ class Model(ABC):
|
|||||||
scores.append(-1000.0)
|
scores.append(-1000.0)
|
||||||
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
|
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
|
||||||
|
|
||||||
|
if vocab_size > len(tokens):
|
||||||
|
pad_count = vocab_size - len(tokens)
|
||||||
|
print(
|
||||||
|
f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]"
|
||||||
|
)
|
||||||
|
for i in range(1, pad_count + 1):
|
||||||
|
tokens.append(f"[PAD{i}]")
|
||||||
|
scores.append(-1000.0)
|
||||||
|
toktypes.append(SentencePieceTokenTypes.UNUSED)
|
||||||
|
|
||||||
assert len(tokens) == vocab_size
|
assert len(tokens) == vocab_size
|
||||||
|
|
||||||
self.gguf_writer.add_tokenizer_model("llama")
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
@ -1789,6 +1799,12 @@ class QwenModel(Model):
|
|||||||
class Qwen2Model(Model):
|
class Qwen2Model(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.QWEN2
|
model_arch = gguf.MODEL_ARCH.QWEN2
|
||||||
|
|
||||||
|
def set_vocab(self):
|
||||||
|
try:
|
||||||
|
self._set_vocab_sentencepiece()
|
||||||
|
except FileNotFoundError:
|
||||||
|
self._set_vocab_gpt2()
|
||||||
|
|
||||||
|
|
||||||
@Model.register("Qwen2MoeForCausalLM")
|
@Model.register("Qwen2MoeForCausalLM")
|
||||||
class Qwen2MoeModel(Model):
|
class Qwen2MoeModel(Model):
|
||||||
|
Loading…
Reference in New Issue
Block a user