Skip to content

Commit

Permalink
Remove unused linear probe code
Browse files Browse the repository at this point in the history
  • Loading branch information
egorkrash committed Feb 20, 2024
1 parent e176ee7 commit bd7ca2f
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 12 deletions.
8 changes: 0 additions & 8 deletions src/train_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,14 +399,6 @@ def compute_objective(metrics: Dict[str, float]) -> float:
'F1 {k}': eval_callback.f1_score[k],}
trainer.log_metrics(f"eval_{k}", metrics)
trainer.save_metrics(f"eval_{k}", metrics)

# !!! THIS BLOCK WITH LINEAR PROBES IS NOT USED FOR THE EXPERIMENTS IN THE PAPER !!!
if training_args.do_lin_probe:
from src.lm_training_utils import linear_probe
logger.info('Starting linear probe')
eval_dataset_d1 = eval_dataset_tokenized['train_questions_qd1consis']
eval_dataset_d2 = eval_dataset_tokenized['train_questions_qd2incons']
linear_probe(model, eval_dataset_d1, eval_dataset_d2, device=training_args.device)

wandb.finish()

Expand Down
5 changes: 1 addition & 4 deletions utils/arguments.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from dataclasses import dataclass, field
from typing import Optional
from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, TrainingArguments, Seq2SeqTrainingArguments
from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, Seq2SeqTrainingArguments
import yaml
from copy import deepcopy
from utils.logger import setup_logger
Expand Down Expand Up @@ -100,9 +100,6 @@ class ModelTrainingArguments(Seq2SeqTrainingArguments):
do_sweeps: Optional[bool] = field(
default=False, metadata={"help": "Whether to do hyperparameters search."}
)
do_lin_probe: Optional[bool] = field(
default=False, metadata={"help": "Whether to do linear probe."}
)
n_sweeps: Optional[int] = field(
default=5, metadata={"help": "Number of hyperparameter sweeps to do."}
)
Expand Down

0 comments on commit bd7ca2f

Please sign in to comment.