|
|
@@ -1838,7 +1838,7 @@ class MmprojModel(ModelBase):
|
|
|
|
|
|
def tensor_force_quant(self, name, new_name, bid, n_dims):
|
|
|
del bid, name, n_dims # unused
|
|
|
- if ".patch_embd.weight" in new_name:
|
|
|
+ if ".patch_embd.weight" in new_name or ".patch_merger.weight" in new_name:
|
|
|
return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
|
|
|
return False
|
|
|
|