From 15f7910d33b41282f8c4fc1c4ca374b1b5965f2b Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Thu, 18 Apr 2024 14:28:03 -0400 Subject: [PATCH] llama-3 examples (#1537) --- examples/llama-3/README.md | 13 +++++ examples/llama-3/fft-8b.yaml | 58 ++++++++++++++++++++ examples/llama-3/lora-8b.yml | 66 +++++++++++++++++++++++ examples/llama-3/qlora-fsdp-70b.yaml | 80 ++++++++++++++++++++++++++++ 4 files changed, 217 insertions(+) create mode 100644 examples/llama-3/README.md create mode 100644 examples/llama-3/fft-8b.yaml create mode 100644 examples/llama-3/lora-8b.yml create mode 100644 examples/llama-3/qlora-fsdp-70b.yaml diff --git a/examples/llama-3/README.md b/examples/llama-3/README.md new file mode 100644 index 0000000000..5c01af27bb --- /dev/null +++ b/examples/llama-3/README.md @@ -0,0 +1,13 @@ +# Llama-3 + +https://llama.meta.com/llama3/ + +[8B Base Model](https://huggingface.co/meta-llama/Meta-Llama-3-8B) + - [Full Fine Tune](./fft-8b.yaml) + - Single GPU @ 48GB VRAM + - [LoRA](./lora-8b.yml) + - Single GPU @ 11GB VRAM + +[70B Base Model](https://huggingface.co/meta-llama/Meta-Llama-3-70B) + - [QLORA+FSDP](./qlora-fsdp-70b.yaml) + - Dual GPU @ 21GB VRAM diff --git a/examples/llama-3/fft-8b.yaml b/examples/llama-3/fft-8b.yaml new file mode 100644 index 0000000000..8c9ba90bfe --- /dev/null +++ b/examples/llama-3/fft-8b.yaml @@ -0,0 +1,58 @@ +base_model: meta-llama/Meta-Llama-3-8B +model_type: LlamaForCausalLM +tokenizer_type: AutoTokenizer + +load_in_8bit: false +load_in_4bit: false +strict: false + +datasets: + - path: tatsu-lab/alpaca + type: alpaca +dataset_prepared_path: last_run_prepared +val_set_size: 0.05 +output_dir: ./out + +sequence_len: 8192 +sample_packing: true +pad_to_sequence_len: true + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + +gradient_accumulation_steps: 8 +micro_batch_size: 1 +num_epochs: 1 +optimizer: paged_adamw_8bit +lr_scheduler: cosine +learning_rate: 2e-5 + +train_on_inputs: false +group_by_length: false +bf16: auto +fp16: +tf32: false + +gradient_checkpointing: true +gradient_checkpointing_kwargs: + use_reentrant: false +early_stopping_patience: +resume_from_checkpoint: +logging_steps: 1 +xformers_attention: +flash_attention: true + +warmup_steps: 100 +evals_per_epoch: 2 +eval_table_size: +saves_per_epoch: 1 +debug: +deepspeed: +weight_decay: 0.0 +fsdp: +fsdp_config: +special_tokens: + pad_token: <|end_of_text|> diff --git a/examples/llama-3/lora-8b.yml b/examples/llama-3/lora-8b.yml new file mode 100644 index 0000000000..a7793dce4c --- /dev/null +++ b/examples/llama-3/lora-8b.yml @@ -0,0 +1,66 @@ +base_model: NousResearch/Llama-2-7b-hf +model_type: LlamaForCausalLM +tokenizer_type: LlamaTokenizer + +load_in_8bit: true +load_in_4bit: false +strict: false + +datasets: + - path: mhenrichsen/alpaca_2k_test + type: alpaca +dataset_prepared_path: +val_set_size: 0.05 +output_dir: ./lora-out + +sequence_len: 4096 +sample_packing: true +pad_to_sequence_len: true + +adapter: lora +lora_model_dir: +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true +lora_fan_in_fan_out: + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + +gradient_accumulation_steps: 4 +micro_batch_size: 2 +num_epochs: 4 +optimizer: adamw_bnb_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +train_on_inputs: false +group_by_length: false +bf16: auto +fp16: +tf32: false + +gradient_checkpointing: true +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +flash_attention: true +s2_attention: + +warmup_steps: 10 +evals_per_epoch: 4 +eval_table_size: +eval_max_new_tokens: 128 +saves_per_epoch: 1 +debug: +deepspeed: +weight_decay: 0.0 +fsdp: +fsdp_config: +special_tokens: diff --git a/examples/llama-3/qlora-fsdp-70b.yaml b/examples/llama-3/qlora-fsdp-70b.yaml new file mode 100644 index 0000000000..8d8785bfd5 --- /dev/null +++ b/examples/llama-3/qlora-fsdp-70b.yaml @@ -0,0 +1,80 @@ +base_model: casperhansen/llama-3-70b-fp16 +model_type: LlamaForCausalLM +tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast + +load_in_8bit: false +load_in_4bit: true +strict: false + +datasets: + - path: tatsu-lab/alpaca + type: alpaca +dataset_prepared_path: last_run_prepared +val_set_size: 0.05 +output_dir: ./out/qlora-llama3-70b + +adapter: qlora +lora_model_dir: + +sequence_len: 512 +sample_packing: false +pad_to_sequence_len: true + +lora_r: 8 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_modules: +lora_target_linear: true +lora_fan_in_fan_out: + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + +gradient_accumulation_steps: 4 +micro_batch_size: 1 +num_epochs: 4 +optimizer: adamw_torch +lr_scheduler: cosine +learning_rate: 0.00001 + +train_on_inputs: false +group_by_length: false +bf16: auto +fp16: +tf32: false + +gradient_checkpointing: true +gradient_checkpointing_kwargs: + use_reentrant: true +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +flash_attention: true + +warmup_steps: 10 +evals_per_epoch: 4 +eval_table_size: +saves_per_epoch: 1 +debug: +deepspeed: +weight_decay: 0.0 +fsdp: + - full_shard + - auto_wrap +fsdp_config: + fsdp_limit_all_gathers: true + fsdp_sync_module_states: true + fsdp_offload_params: true + fsdp_use_orig_params: false + fsdp_cpu_ram_efficient_loading: true + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP + fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer + fsdp_state_dict_type: FULL_STATE_DICT + fsdp_sharding_strategy: FULL_SHARD +special_tokens: + pad_token: <|end_of_text|>