mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
convert : fix missing ftype for gemma (#5690)
This commit is contained in:
parent
15499eb942
commit
54fbcd2ce6
@ -1803,6 +1803,7 @@ class GemmaModel(Model):
|
|||||||
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
|
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
|
||||||
self.gguf_writer.add_key_length(hparams["head_dim"])
|
self.gguf_writer.add_key_length(hparams["head_dim"])
|
||||||
self.gguf_writer.add_value_length(hparams["head_dim"])
|
self.gguf_writer.add_value_length(hparams["head_dim"])
|
||||||
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
|
||||||
def write_tensors(self):
|
def write_tensors(self):
|
||||||
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
||||||
|
Loading…
Reference in New Issue
Block a user