-
Notifications
You must be signed in to change notification settings - Fork 1
/
dpo_qwen2.sh
73 lines (63 loc) · 1.88 KB
/
dpo_qwen2.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
#!/bin/bash
#set -x
#
source ~/.bashrc
# Previous env
#export NCCL_DEBUG=INFO
#export NCCL_SOCKET_IFNAME=eth1
#export NCCL_IB_GID_INDEX=3
#export NCCL_IB_SL=3
#export NCCL_NET_GDR_READ=1
# RDMA env
export NCCL_SOCKET_IFNAME=eth1
export NCCL_IB_GID_INDEX=3
export NCCL_IB_HCA=mlx5_2:1,mlx5_2:1
export NCCL_IB_SL=3
export NCCL_CHECKS_DISABLE=1
export NCCL_P2P_DISABLE=0
export NCCL_LL_THRESHOLD=16384
export NCCL_IB_CUDA_SUPPORT=1
export MASTER_ADDR="${CHIEF_IP:=localhost}"
export MASTER_PORT="${MASTER_PORT:=29501}" # port different from remote submit
#export RANK=$OMPI_COMM_WORLD_RANK
#export LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK
#export WORLD_SIZE=$OMPI_COMM_WORLD_SIZE
Proj_path=[YOUR_WORKSPACE]
Code_path=${Proj_path}/mytrain/llms_scripts
Model_path=${Proj_path}/FinetuneModels/Qwen2-0.5B-sft
#
Data_path=${Proj_path}/Dataset/dpo_chat
Train_path=${Data_path}/dpo_train.json
Valid_path=${Data_path}/dpo_eval.json
#
Output_path=${Proj_path}/FinetuneModels/Qwen2-0.5B-sft-dpo
# original 16:=4 x 1 x 4
accelerate launch --num_processes 4 \
--mixed_precision 'bf16' \
${Code_path}/llms_dpo.py \
--model_name_or_path ${Model_path} \
--train_file ${Train_path} \
--eval_file ${Valid_path} \
--output_dir ${Output_path} \
--per_device_train_batch_size 1 \
--max_prompt_length 512 \
--max_length 1024 \
--learning_rate 2e-6 \
--gradient_accumulation_steps 4 \
--logging_steps 10 \
--eval_steps 50 \
--max_steps 2000 \
--save_strategy "steps" \
--save_steps 200 \
--save_total_limit 1 \
--output_dir=${Output_path} \
--warmup_steps 10 \
--logging_first_step \
--no_remove_unused_columns \
--use_peft \
--lora_r=64 \
--lora_alpha=16 \
--lora_dropout=0.05 \
--lora_target_modules q_proj k_proj v_proj o_proj up_proj gate_proj down_proj \
--attn_implementation=flash_attention_2 \
--gradient_checkpointing True