Skip to content

Commit fdbed5d

Browse files
authored
Merge branch 'master' into wyen/fix_pg_update
2 parents 25715d2 + f8f7363 commit fdbed5d

File tree

4 files changed

+17
-20
lines changed

4 files changed

+17
-20
lines changed

examples/pytorch/language-modeling/train_api_hf_dataset.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@
2121
"from kubeflow.storage_initializer.s3 import S3DatasetParams\n",
2222
"from kubeflow.storage_initializer.hugging_face import (\n",
2323
" HuggingFaceModelParams,\n",
24-
" HuggingFaceTrainParams,\n",
25-
" HfDatasetParams,\n",
24+
" HuggingFaceTrainerParams,\n",
25+
" HuggingFaceDatasetParams,\n",
2626
")\n",
2727
"from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n",
2828
"from peft import LoraConfig\n",
@@ -70,8 +70,8 @@
7070
" ),\n",
7171
" # it is assumed for text related tasks, you have 'text' column in the dataset.\n",
7272
" # for more info on how dataset is loaded check load_and_preprocess_data function in sdk/python/kubeflow/trainer/hf_llm_training.py\n",
73-
" dataset_provider_parameters=HfDatasetParams(repo_id=\"imdatta0/ultrachat_1k\"),\n",
74-
" train_parameters=HuggingFaceTrainParams(\n",
73+
" dataset_provider_parameters=HuggingFaceDatasetParams(repo_id=\"imdatta0/ultrachat_1k\"),\n",
74+
" trainer_parameters=HuggingFaceTrainerParams(\n",
7575
" lora_config=LoraConfig(\n",
7676
" r=8,\n",
7777
" lora_alpha=8,\n",

examples/pytorch/language-modeling/train_api_s3_dataset.ipynb

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,9 @@
2020
"from kubeflow.training.api.training_client import TrainingClient\n",
2121
"from kubeflow.storage_initializer.hugging_face import (\n",
2222
" HuggingFaceModelParams,\n",
23-
" HuggingFaceTrainParams,\n",
24-
" HfDatasetParams,\n",
23+
" HuggingFaceTrainerParams,\n",
2524
")\n",
25+
"from kubeflow.storage_initializer.s3 import S3DatasetParams\n",
2626
"from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n",
2727
"from peft import LoraConfig\n",
2828
"import transformers\n",
@@ -81,16 +81,14 @@
8181
" # it is assumed for text related tasks, you have 'text' column in the dataset.\n",
8282
" # for more info on how dataset is loaded check load_and_preprocess_data function in sdk/python/kubeflow/trainer/hf_llm_training.py\n",
8383
" dataset_provider_parameters=S3DatasetParams(\n",
84-
" {\n",
85-
" \"endpoint_url\": \"http://10.117.63.3\",\n",
86-
" \"bucket_name\": \"test\",\n",
87-
" \"file_key\": \"imdatta0___ultrachat_1k\",\n",
88-
" \"region_name\": \"us-east-1\",\n",
89-
" \"access_key\": s3_access_key,\n",
90-
" \"secret_key\": s3_secret_key,\n",
91-
" }\n",
84+
" endpoint_url=\"http://10.117.63.3\",\n",
85+
" bucket_name=\"test\",\n",
86+
" file_key=\"imdatta0___ultrachat_1k\",\n",
87+
" region_name=\"us-east-1\",\n",
88+
" access_key=s3_access_key,\n",
89+
" secret_key=s3_secret_key,\n",
9290
" ),\n",
93-
" train_parameters=HuggingFaceTrainParams(\n",
91+
" trainer_parameters=HuggingFaceTrainerParams(\n",
9492
" lora_config=LoraConfig(\n",
9593
" r=8,\n",
9694
" lora_alpha=8,\n",

examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -613,8 +613,8 @@
613613
"from kubeflow.training import TrainingClient\n",
614614
"from kubeflow.storage_initializer.hugging_face import (\n",
615615
" HuggingFaceModelParams,\n",
616-
" HuggingFaceTrainParams,\n",
617-
" HfDatasetParams,\n",
616+
" HuggingFaceTrainerParams,\n",
617+
" HuggingFaceDatasetParams,\n",
618618
")\n",
619619
"\n",
620620
"import transformers\n",
@@ -646,12 +646,12 @@
646646
" \"access_modes\": [\"ReadWriteOnce\"] # Since we use 1 Worker, PVC access mode is ReadWriteOnce.\n",
647647
" },\n",
648648
" # Use 3000 samples from Yelp dataset.\n",
649-
" dataset_provider_parameters=HfDatasetParams(\n",
649+
" dataset_provider_parameters=HuggingFaceDatasetParams(\n",
650650
" repo_id=\"yelp_review_full\",\n",
651651
" split=\"train[:3000]\",\n",
652652
" ),\n",
653653
" # Specify HuggingFace Trainer parameters. In this example, we will skip evaluation and model checkpoints.\n",
654-
" train_parameters=HuggingFaceTrainParams(\n",
654+
" trainer_parameters=HuggingFaceTrainerParams(\n",
655655
" training_parameters=transformers.TrainingArguments(\n",
656656
" output_dir=\"test_trainer\",\n",
657657
" save_strategy=\"no\",\n",

pkg/controller.v1/common/scheduling.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ package common
1818

1919
import (
2020
"fmt"
21-
2221
log "github.com/sirupsen/logrus"
2322
k8serrors "k8s.io/apimachinery/pkg/api/errors"
2423
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

0 commit comments

Comments
 (0)