From 228420972ee0d0ab82e1221eb9bc5bf13afd5693 Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Thu, 14 Sep 2023 11:17:47 -0400 Subject: [PATCH] Phi examples (#569) * add phi full ft example * Add readme to point out that deepspeed should be used * zero1 is better than zero2 for phi --- examples/phi/README.md | 7 ++++ examples/phi/phi-ft.yml | 75 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 examples/phi/README.md create mode 100644 examples/phi/phi-ft.yml diff --git a/examples/phi/README.md b/examples/phi/README.md new file mode 100644 index 0000000000..6e12eec184 --- /dev/null +++ b/examples/phi/README.md @@ -0,0 +1,7 @@ +# Phi + +Due to some nuances with the phi code, please use deepspeed when training phi. + +```shell +accelerate launch scripts/finetune.py examples/phi/phi-ft.yml --deepspeed deepspeed/zero1.json +``` diff --git a/examples/phi/phi-ft.yml b/examples/phi/phi-ft.yml new file mode 100644 index 0000000000..b5cde8139c --- /dev/null +++ b/examples/phi/phi-ft.yml @@ -0,0 +1,75 @@ +base_model: microsoft/phi-1_5 +base_model_config: microsoft/phi-1_5 +model_type: AutoModelForCausalLM +tokenizer_type: AutoTokenizer +is_llama_derived_model: false +trust_remote_code: true + +load_in_8bit: false +load_in_4bit: false +strict: false + +datasets: + - path: garage-bAInd/Open-Platypus + type: alpaca + +dataset_prepared_path: last_run_prepared +val_set_size: 0.05 +output_dir: ./phi-sft-out + +sequence_len: 2048 +sample_packing: false # does not work with phi +pad_to_sequence_len: + +adapter: +lora_model_dir: +lora_r: +lora_alpha: +lora_dropout: +lora_target_linear: +lora_fan_in_fan_out: + +wandb_project: +wandb_entity: +wandb_watch: +wandb_run_id: +wandb_log_model: + +gradient_accumulation_steps: 2 +micro_batch_size: 1 +num_epochs: 4 +optimizer: adamw_bnb_8bit +adam_beta2: 0.95 +adam_epsilon: 0.00001 +max_grad_norm: 1.0 +lr_scheduler: cosine +learning_rate: 0.000003 + +train_on_inputs: false +group_by_length: true +bf16: true +fp16: false +tf32: true + +gradient_checkpointing: +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +flash_attention: + +warmup_steps: 100 +eval_steps: 0.05 +save_steps: +debug: +deepspeed: +weight_decay: 0.1 +fsdp: +fsdp_config: +resize_token_embeddings_to_32x: true +special_tokens: + bos_token: "<|endoftext|>" + eos_token: "<|endoftext|>" + unk_token: "<|endoftext|>" + pad_token: "<|endoftext|>"