Remove .attention from skipped tensors to match more accurately (#7051)

This commit is contained in:
Bartowski 2024-05-02 19:49:09 -04:00 committed by GitHub
parent 6ecf3189e0
commit 60325fa56f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1427,7 +1427,7 @@ class LlamaModel(Model):
experts = dict()
for name, data_torch in self.get_tensors():
# we don't need these
if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
continue
old_dtype = data_torch.dtype