mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 13:24:35 +00:00
convert-hf : make actual types match annotations
This commit is contained in:
parent
c29af7e225
commit
95845d17ec
@ -1679,7 +1679,7 @@ class BertModel(Model):
|
|||||||
else:
|
else:
|
||||||
raise NotImplementedError("Only MEAN and CLS pooling types supported")
|
raise NotImplementedError("Only MEAN and CLS pooling types supported")
|
||||||
|
|
||||||
self.gguf_writer.add_pooling_type(pooling_type.value)
|
self.gguf_writer.add_pooling_type(pooling_type)
|
||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
path = self.dir_model
|
path = self.dir_model
|
||||||
|
@ -362,7 +362,7 @@ class GGUFWriter:
|
|||||||
self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)
|
self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)
|
||||||
|
|
||||||
def add_pooling_type(self, value: PoolingType) -> None:
|
def add_pooling_type(self, value: PoolingType) -> None:
|
||||||
self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value)
|
self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value.value)
|
||||||
|
|
||||||
def add_rope_dimension_count(self, count: int) -> None:
|
def add_rope_dimension_count(self, count: int) -> None:
|
||||||
self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count)
|
self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count)
|
||||||
|
Loading…
Reference in New Issue
Block a user