Skip to content

Commit

Permalink
chore(callback): Remove old peft saving code (#510)
Browse files Browse the repository at this point in the history
  • Loading branch information
NanoCode012 authored Sep 22, 2023
1 parent 03e5907 commit d5f8589
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 30 deletions.
23 changes: 0 additions & 23 deletions src/axolotl/utils/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,29 +43,6 @@
IGNORE_INDEX = -100


class SavePeftModelCallback(TrainerCallback): # pylint: disable=too-few-public-methods
"""Callback to save the PEFT adapter"""

def on_save(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(
args.output_dir,
f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}",
)

peft_model_path = os.path.join(checkpoint_folder, "adapter_model")
kwargs["model"].save_pretrained(
peft_model_path, save_safetensors=args.save_safetensors
)

return control


class EvalFirstStepCallback(
TrainerCallback
): # pylint: disable=too-few-public-methods disable=unused-argument
Expand Down
7 changes: 0 additions & 7 deletions src/axolotl/utils/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
EvalFirstStepCallback,
GPUStatsCallback,
SaveBetterTransformerModelCallback,
SavePeftModelCallback,
bench_eval_callback_factory,
log_prediction_callback_factory,
)
Expand Down Expand Up @@ -711,12 +710,6 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer, total_num_
if cfg.relora_steps:
callbacks.append(ReLoRACallback(cfg))

if cfg.local_rank == 0 and cfg.adapter in [
"lora",
"qlora",
]: # only save in rank 0
callbacks.append(SavePeftModelCallback)

if hasattr(model, "use_bettertransformer") and model.use_bettertransformer is True:
callbacks.append(SaveBetterTransformerModelCallback)

Expand Down

0 comments on commit d5f8589

Please sign in to comment.