Skip to content

Commit ec22e4b

Browse files
author
pockers21
committed
refactor: make Eagle2DraftModel inherits from Qwen2Model instead of TextModel
1 parent 4b4975c commit ec22e4b

File tree

1 file changed

+1
-15
lines changed

1 file changed

+1
-15
lines changed

convert_hf_to_gguf.py

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2713,23 +2713,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
27132713

27142714

27152715
@ModelBase.register("Eagle2DraftForCausalLM")
2716-
class Eagle2DraftModel(TextModel):
2716+
class Eagle2DraftModel(Qwen2Model):
27172717
model_arch = gguf.MODEL_ARCH.EAGLE2_DRAFT
27182718

2719-
def set_vocab(self):
2720-
try:
2721-
self._set_vocab_sentencepiece()
2722-
except FileNotFoundError:
2723-
self._set_vocab_gpt2()
2724-
2725-
def set_gguf_parameters(self):
2726-
super().set_gguf_parameters()
2727-
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
2728-
if self.hparams["rope_scaling"].get("type") == "yarn":
2729-
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
2730-
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
2731-
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
2732-
27332719

27342720
@ModelBase.register(
27352721
"Qwen2VLModel",

0 commit comments

Comments
 (0)