Skip to content

Commit

Permalink
fix(examples): remove is_*_derived as it's parsed automatically (#1297)
Browse files Browse the repository at this point in the history
  • Loading branch information
NanoCode012 authored Feb 21, 2024
1 parent e2786cc commit a7a9a14
Show file tree
Hide file tree
Showing 26 changed files with 5 additions and 27 deletions.
1 change: 0 additions & 1 deletion devtools/dev_sharegpt.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/code-llama/13b/lora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: codellama/CodeLlama-13b-hf
model_type: LlamaForCausalLM
tokenizer_type: CodeLlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/code-llama/13b/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: codellama/CodeLlama-13b-hf
model_type: LlamaForCausalLM
tokenizer_type: CodeLlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: true
Expand Down
1 change: 0 additions & 1 deletion examples/code-llama/34b/lora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: codellama/CodeLlama-34b-hf
model_type: LlamaForCausalLM
tokenizer_type: CodeLlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/code-llama/34b/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: codellama/CodeLlama-34b-hf
model_type: LlamaForCausalLM
tokenizer_type: CodeLlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: true
Expand Down
1 change: 0 additions & 1 deletion examples/code-llama/7b/lora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: codellama/CodeLlama-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: CodeLlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/code-llama/7b/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: codellama/CodeLlama-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: CodeLlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: true
Expand Down
2 changes: 1 addition & 1 deletion examples/falcon/config-7b-lora.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ base_model: tiiuae/falcon-7b
trust_remote_code: true
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
is_falcon_derived_model: true

load_in_8bit: true
load_in_4bit: false
gptq: false
Expand Down
2 changes: 1 addition & 1 deletion examples/falcon/config-7b-qlora.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ base_model: tiiuae/falcon-7b
trust_remote_code: true
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
is_falcon_derived_model: true

load_in_8bit: false
# enable 4bit for QLoRA
load_in_4bit: true
Expand Down
2 changes: 1 addition & 1 deletion examples/falcon/config-7b.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ base_model: tiiuae/falcon-7b
trust_remote_code: true
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
is_falcon_derived_model: true

load_in_8bit: false
load_in_4bit: false
gptq: false
Expand Down
1 change: 0 additions & 1 deletion examples/llama-2/fft_optimized.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: NousResearch/Llama-2-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/llama-2/gptq-lora.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
base_model: TheBloke/Llama-2-7B-GPTQ
is_llama_derived_model: false
gptq: true
gptq_disable_exllama: true
model_type: AutoModelForCausalLM
Expand Down
1 change: 0 additions & 1 deletion examples/llama-2/loftq.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: NousResearch/Llama-2-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/llama-2/lora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: NousResearch/Llama-2-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/llama-2/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: NousResearch/Llama-2-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: true
Expand Down
2 changes: 1 addition & 1 deletion examples/llama-2/relora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
base_model: NousResearch/Llama-2-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true


load_in_8bit: false
load_in_4bit: true
Expand Down
1 change: 0 additions & 1 deletion examples/mistral/Mistral-7b-example/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
base_model: mistralai/Mistral-7B-v0.1
model_type: MistralForCausalLM
tokenizer_type: LlamaTokenizer
is_mistral_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/mistral/config.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: mistralai/Mistral-7B-v0.1
model_type: MistralForCausalLM
tokenizer_type: LlamaTokenizer
is_mistral_derived_model: true

load_in_8bit: false
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/mistral/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: mistralai/Mistral-7B-v0.1
model_type: MistralForCausalLM
tokenizer_type: LlamaTokenizer
is_mistral_derived_model: true

load_in_8bit: false
load_in_4bit: true
Expand Down
1 change: 0 additions & 1 deletion examples/qwen/lora.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ base_model: Qwen/Qwen-7B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer

is_qwen_derived_model: true
trust_remote_code: true

load_in_8bit: true
Expand Down
1 change: 0 additions & 1 deletion examples/qwen/qlora.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ base_model: Qwen/Qwen-7B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer

is_qwen_derived_model: true
trust_remote_code: true

load_in_8bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/tiny-llama/lora-mps.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/tiny-llama/lora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/tiny-llama/pretrain.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0

model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: false
Expand Down
1 change: 0 additions & 1 deletion examples/tiny-llama/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: true
Expand Down
3 changes: 1 addition & 2 deletions examples/yi-34B-chat/qlora.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
base_model: 01-ai/Yi-34B-Chat
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_mistral_derived_model: false
is_llama_derived_model: true

load_in_8bit: false
load_in_4bit: true
strict: false
Expand Down

0 comments on commit a7a9a14

Please sign in to comment.