|
|
@@ -2269,7 +2269,7 @@ class Qwen2Model(Model):
|
|
|
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
|
|
|
|
|
|
|
|
|
-@Model.register("Qwen2VLForConditionalGeneration")
|
|
|
+@Model.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
|
|
|
class Qwen2VLModel(Model):
|
|
|
model_arch = gguf.MODEL_ARCH.QWEN2VL
|
|
|
|