mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 10:54:36 +00:00
gguf-py : fix dtype check (#6045)
This commit is contained in:
parent
15a333260a
commit
77178eedc8
@ -204,7 +204,7 @@ class GGUFWriter:
|
|||||||
for i in range(n_dims):
|
for i in range(n_dims):
|
||||||
self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i])
|
self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i])
|
||||||
if raw_dtype is None:
|
if raw_dtype is None:
|
||||||
if tensor_shape == np.float32:
|
if tensor_dtype == np.float32:
|
||||||
dtype = GGMLQuantizationType.F32
|
dtype = GGMLQuantizationType.F32
|
||||||
elif tensor_dtype == np.float16:
|
elif tensor_dtype == np.float16:
|
||||||
dtype = GGMLQuantizationType.F16
|
dtype = GGMLQuantizationType.F16
|
||||||
|
Loading…
Reference in New Issue
Block a user