From 6ef46f8dcac84825c39fcea57917260abd33e9ac Mon Sep 17 00:00:00 2001 From: Evan Griffiths <56087052+evangriffiths@users.noreply.github.com> Date: Mon, 25 Dec 2023 18:29:55 +0000 Subject: [PATCH] Add an example config for finetuning a 34B model on a 24GB GPU (#1000) * Add an example config for finetuning a 34B model on a 24GB GPU * Remore wandb project --- examples/yi-34B-chat/README.md | 5 +++ examples/yi-34B-chat/qlora.yml | 76 ++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 examples/yi-34B-chat/README.md create mode 100644 examples/yi-34B-chat/qlora.yml diff --git a/examples/yi-34B-chat/README.md b/examples/yi-34B-chat/README.md new file mode 100644 index 0000000000..07078850fb --- /dev/null +++ b/examples/yi-34B-chat/README.md @@ -0,0 +1,5 @@ +# Overview + +This is an example of a Yi-34B-Chat configuration. It demonstrates that it is possible to finetune a 34B model on a GPU with 24GB of VRAM. + +Tested on an RTX 4090 with `python -m axolotl.cli.train examples/mistral/qlora.yml`, a single epoch of finetuning on the alpaca dataset using qlora runs in 47 mins, using 97% of available memory. diff --git a/examples/yi-34B-chat/qlora.yml b/examples/yi-34B-chat/qlora.yml new file mode 100644 index 0000000000..0c1a4b7889 --- /dev/null +++ b/examples/yi-34B-chat/qlora.yml @@ -0,0 +1,76 @@ +base_model: 01-ai/Yi-34B-Chat +model_type: LlamaForCausalLM +tokenizer_type: LlamaTokenizer +is_mistral_derived_model: false +is_llama_derived_model: true +load_in_8bit: false +load_in_4bit: true +strict: false +sequence_len: 1024 +bf16: true +fp16: false +tf32: false +flash_attention: true +special_tokens: + bos_token: "<|startoftext|>" + eos_token: "<|endoftext|>" + unk_token: "" + +# Data +datasets: + - path: mhenrichsen/alpaca_2k_test + type: alpaca +warmup_steps: 10 + +# Iterations +num_epochs: 1 + +# Evaluation +val_set_size: 0.1 +evals_per_epoch: 5 +eval_table_size: +eval_table_max_new_tokens: 128 +eval_sample_packing: false +eval_batch_size: 1 + +# LoRA +output_dir: ./qlora-out +adapter: qlora +lora_model_dir: +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true +lora_fan_in_fan_out: +lora_target_modules: + +# Sampling +sample_packing: false +pad_to_sequence_len: false + +# Batching +gradient_accumulation_steps: 4 +micro_batch_size: 1 +gradient_checkpointing: true + +# wandb +wandb_project: + +# Optimizer +optimizer: paged_adamw_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +# Misc +train_on_inputs: false +group_by_length: false +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +debug: +deepspeed: +weight_decay: 0 +fsdp: +fsdp_config: