-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathfreeze.sh
executable file
·77 lines (64 loc) · 2.69 KB
/
freeze.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
TOT_CUDA="4,5,6"
PORT="11458"
CUDA_VISIBLE_DEVICES=${TOT_CUDA} deepspeed --master_port=$PORT --num_gpus=3 finetune_freeze.py \
--train_path /liuzyai04/thuir/lht/context_learning/data/instruction_data/all_instrution.json \
--max_len 768 \
--max_input_len 512 \
--model_name_or_path /liuzyai04/thuir/lht/context_learning/chatGLM-6B \
--tokenizer_name /liuzyai04/thuir/lht/context_learning/chatGLM-6B \
--lora_rank 8 \
--per_device_train_batch_size 16 \
--gradient_accumulation_steps 4 \
--num_train_epochs 10 \
--save_steps 900 \
--learning_rate 1e-5 \
--fp16 \
--remove_unused_columns false \
--logging_steps 50 \
--output_dir /liuzyai04/thuir/lht/context_learning/LORA/output_freeze \
--deepspeed /liuzyai04/thuir/lht/context_learning/LORA/ds_config.json \
# python -m torch.distributed.launch \
# --nproc_per_node 1 \
# --master_port 29508 \
# finetune.py \
# --train_path /liuzyai04/thuir/lht/context_learning/data/instruction_data/all_instrution.json \
# --max_len 700 \
# --max_input_len 350 \
# --model_name_or_path /liuzyai04/thuir/lht/context_learning/chatGLM-6B \
# --tokenizer_name /liuzyai04/thuir/lht/context_learning/chatGLM-6B \
# --lora_rank 8 \
# --per_device_train_batch_size 2 \
# --gradient_accumulation_steps 1 \
# --max_steps 52000 \
# --save_steps 1000 \
# --learning_rate 5e-6 \
# --fp16 \
# --remove_unused_columns false \
# --logging_steps 50 \
# --output_dir /liuzyai04/thuir/lht/context_learning/ChatGLM-Tuning-master/output \
# CUDA_VISIBLE_DEVICES=5
# accelerate launch --main_process_port=29500 --num_processes=2 \
# --config_file /liuzyai04/thuir/lht/context_learning/ChatGLM-Tuning-master/acce_config/acce.yaml \
# finetune.py \
# --train_path /liuzyai04/thuir/lht/context_learning/data/instruction_data/all_instrution.json \
# --max_len 700 \
# --max_input_len 350 \
# --model_name_or_path /liuzyai04/thuir/lht/context_learning/chatGLM-6B \
# --tokenizer_name /liuzyai04/thuir/lht/context_learning/chatGLM-6B \
# --lora_rank 8 \
# --per_device_train_batch_size 2 \
# --gradient_accumulation_steps 1 \
# --max_steps 52000 \
# --save_steps 1000 \
# --learning_rate 5e-6 \
# --fp16 \
# --remove_unused_columns false \
# --logging_steps 50 \
# --output_dir /liuzyai04/thuir/lht/context_learning/ChatGLM-Tuning-master/output \
# --deepspeed /liuzyai04/thuir/lht/context_learning/ChatGLM-Tuning-master/ds_config.json \
# CUDA_VISIBLE_DEVICES=5
# python3 -m torch.distributed.launch \
# --nproc_per_node 1 \
# --master_port 29505 \
# train_gptj_summarize.py \
# --fp16 \