diff --git a/pyproject.toml b/pyproject.toml index b8bdfd6..145650c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api" [tool.poetry] name = "together" -version = "1.5.15" +version = "1.5.16" authors = ["Together AI "] description = "Python client for Together's Cloud Platform!" readme = "README.md" diff --git a/src/together/resources/finetune.py b/src/together/resources/finetune.py index 27baf2d..67c74c4 100644 --- a/src/together/resources/finetune.py +++ b/src/together/resources/finetune.py @@ -183,24 +183,6 @@ def create_finetune_request( ) train_on_inputs = "auto" - if dpo_beta is not None and training_method != "dpo": - raise ValueError("dpo_beta is only supported for DPO training") - if dpo_normalize_logratios_by_length and training_method != "dpo": - raise ValueError( - "dpo_normalize_logratios_by_length=True is only supported for DPO training" - ) - if rpo_alpha is not None: - if training_method != "dpo": - raise ValueError("rpo_alpha is only supported for DPO training") - if not rpo_alpha >= 0.0: - raise ValueError(f"rpo_alpha should be non-negative (got {rpo_alpha})") - - if simpo_gamma is not None: - if training_method != "dpo": - raise ValueError("simpo_gamma is only supported for DPO training") - if not simpo_gamma >= 0.0: - raise ValueError(f"simpo_gamma should be non-negative (got {simpo_gamma})") - lr_scheduler: FinetuneLRScheduler if lr_scheduler_type == "cosine": if scheduler_num_cycles <= 0.0: