From 067d8382db7aa814db3b543d34848a8ac0f6f503 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Thu, 18 Apr 2024 13:02:38 -0400 Subject: [PATCH] Eval strat --- docs/source/transformers_integrations.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/transformers_integrations.mdx b/docs/source/transformers_integrations.mdx index 7993b2f28..d385e2683 100644 --- a/docs/source/transformers_integrations.mdx +++ b/docs/source/transformers_integrations.mdx @@ -8,7 +8,7 @@ pip install datasets transformers torch evaluate nltk rouge_score ## Trainer -The metrics in `evaluate` can be easily integrated with the [`~transformers.Trainer`]. The `Trainer` accepts a `compute_metrics` keyword argument that passes a function to compute metrics. One can specify the evaluation interval with `evaluation_strategy` in the [`~transformers.TrainerArguments`], and based on that, the model is evaluated accordingly, and the predictions and labels passed to `compute_metrics`. +The metrics in `evaluate` can be easily integrated with the [`~transformers.Trainer`]. The `Trainer` accepts a `compute_metrics` keyword argument that passes a function to compute metrics. One can specify the evaluation interval with `eval_strategy` in the [`~transformers.TrainerArguments`], and based on that, the model is evaluated accordingly, and the predictions and labels passed to `compute_metrics`. ```python from datasets import load_dataset @@ -38,7 +38,7 @@ def compute_metrics(eval_pred): # Load pretrained model and evaluate model after each epoch model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5) -training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch") +training_args = TrainingArguments(output_dir="test_trainer", eval_strategy="epoch") trainer = Trainer( model=model, @@ -105,7 +105,7 @@ data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model) training_args = Seq2SeqTrainingArguments( output_dir="./results", - evaluation_strategy="epoch", + eval_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=4,