From df8e7731d40034371ad547eb62357868572a5db6 Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Thu, 11 Apr 2024 01:01:58 -0400 Subject: [PATCH] update mistral/mistral qlora+fsdp yamls --- examples/mistral/mistral-qlora-fsdp.yml | 82 +++++++++++++++++++++++++ examples/mistral/mixtral-qlora-fsdp.yml | 8 +++ 2 files changed, 90 insertions(+) create mode 100644 examples/mistral/mistral-qlora-fsdp.yml diff --git a/examples/mistral/mistral-qlora-fsdp.yml b/examples/mistral/mistral-qlora-fsdp.yml new file mode 100644 index 0000000000..46ebaf47f1 --- /dev/null +++ b/examples/mistral/mistral-qlora-fsdp.yml @@ -0,0 +1,82 @@ +base_model: mistralai/Mixtral-8x7B-v0.1 +model_type: AutoModelForCausalLM +tokenizer_type: LlamaTokenizer +trust_remote_code: true + +load_in_8bit: false +load_in_4bit: true +strict: false + +datasets: + - path: tatsu-lab/alpaca + type: alpaca +dataset_prepared_path: last_run_prepared +val_set_size: 0.02 +output_dir: ./qlora-out + +model_config: + output_router_logits: true + +adapter: qlora +lora_model_dir: + +sequence_len: 1024 +sample_packing: false +pad_to_sequence_len: false + +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true +lora_fan_in_fan_out: + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + +gradient_accumulation_steps: 4 +micro_batch_size: 2 +num_epochs: 1 +optimizer: paged_adamw_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +train_on_inputs: false +group_by_length: false +bf16: auto +fp16: +tf32: false + +gradient_checkpointing: true +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +flash_attention: true + +loss_watchdog_threshold: 5.0 +loss_watchdog_patience: 3 + +warmup_steps: 10 +evals_per_epoch: 4 +eval_table_size: +eval_max_new_tokens: 128 +saves_per_epoch: 1 +debug: +weight_decay: 0.0 +fsdp: + - full_shard + - auto_wrap +fsdp_config: + fsdp_limit_all_gathers: true + fsdp_sync_module_states: true + fsdp_offload_params: false + fsdp_use_orig_params: false + fsdp_cpu_ram_efficient_loading: false + fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer + fsdp_state_dict_type: SHARDED_STATE_DICT + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP +special_tokens: diff --git a/examples/mistral/mixtral-qlora-fsdp.yml b/examples/mistral/mixtral-qlora-fsdp.yml index 32db7073b7..dd1ab77a10 100644 --- a/examples/mistral/mixtral-qlora-fsdp.yml +++ b/examples/mistral/mixtral-qlora-fsdp.yml @@ -69,6 +69,14 @@ debug: weight_decay: 0.0 fsdp: - full_shard + - auto_wrap fsdp_config: + fsdp_limit_all_gathers: true + fsdp_sync_module_states: true + fsdp_offload_params: false + fsdp_use_orig_params: false + fsdp_cpu_ram_efficient_loading: true fsdp_transformer_layer_cls_to_wrap: MixtralSparseMoeBlock + fsdp_state_dict_type: SHARDED_STATE_DICT + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP special_tokens: