Skip to content

Commit

Permalink
test(workflow): add workflow for loss test and change trigger event (I…
Browse files Browse the repository at this point in the history
…nternLM#513)

* add workflow for loss test

* change trigger event

* optimize trigger event

---------

Co-authored-by: wangmengke <[email protected]>
  • Loading branch information
kkscilife and wangmengke authored Nov 30, 2023
1 parent 757e19e commit b79d5ea
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 20 deletions.
17 changes: 17 additions & 0 deletions .github/workflows/pr_merged.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,20 @@ jobs:
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
export PYTHONPATH=$PWD:$PYTHONPATH
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-acc-test-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python ./tests/test_training/train_CI.py --config ./tests/test_training/7B_check_acc.py
check_loss_when_swapping_micro_num_and_micro_bsz:
if: ${{ !cancelled() }}
needs: check-requirements
runs-on: [t_cluster]
timeout-minutes: 40
steps:
- name: mask env
run: |
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
- uses: actions/checkout@v3

- name: loss_tests
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
export PYTHONPATH=$PWD:$PYTHONPATH
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-loss-test-${GITHUB_RUN_ID}-${GITHUB_JOB} -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_training/test_swap_nb_loss_and_gradnorm.py
53 changes: 33 additions & 20 deletions .github/workflows/weekly_test.yaml
Original file line number Diff line number Diff line change
@@ -1,102 +1,115 @@
name: weekly-tests
on:
push:
branches:
- "main"
- "develop"
workflow_dispatch:
schedule:
- cron: '56 18 * * 5'
env:
SLURM_PARTITION: llm_s

jobs:
training_8GPU:
runs-on: [t_cluster]
timeout-minutes: 5
timeout-minutes: 10
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event_name == 'schedule' && 'develop' || github.event_name == 'workflow_dispatch' && '' }}

- name: training_8GPU
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
training_16GPU_8DP2TP:
runs-on: [t_cluster]
timeout-minutes: 5
timeout-minutes: 10
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event_name == 'schedule' && 'develop' || github.event_name == 'workflow_dispatch' && '' }}

- name: training_16GPU_8DP2TP
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TP" ./tests/test_training
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TP" ./tests/test_training
training_16GPU_8DP2TPSP:
runs-on: [t_cluster]
timeout-minutes: 5
timeout-minutes: 10
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event_name == 'schedule' && 'develop' || github.event_name == 'workflow_dispatch' && '' }}

- name: training_16GPU_8DP2TPSP
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
sed -i 's/^.*sequence_parallel=.*/ sequence_parallel=True,/' ./configs/7B_sft.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TPSP" ./tests/test_training
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TPSP" ./tests/test_training
training_16GPU_8DP2PP:
runs-on: [t_cluster]
timeout-minutes: 5
timeout-minutes: 10
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event_name == 'schedule' && 'develop' || github.event_name == 'workflow_dispatch' && '' }}

- name: training_16GPU_8DP2PP
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2),/' ./configs/7B_sft.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP" ./tests/test_training
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP" ./tests/test_training
training_16GPU_8DP2PP_InterleavedOverlap:
runs-on: [t_cluster]
timeout-minutes: 5
timeout-minutes: 10
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event_name == 'schedule' && 'develop' || github.event_name == 'workflow_dispatch' && '' }}

- name: training_16GPU_8DP2PP_InterleavedOverlap
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2, interleaved_overlap=True),/' ./configs/7B_sft.py
sed -i 's/^.*num_chunks=.*/ num_chunks=2,/' ./configs/7B_sft.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP_InterleavedOverlap" ./tests/test_training
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP_InterleavedOverlap" ./tests/test_training
unit_test_optimizer:
runs-on: [t_cluster]
timeout-minutes: 30
timeout-minutes: 35
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event_name == 'schedule' && 'develop' || github.event_name == 'workflow_dispatch' && '' }}

- name: test_optimizer
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_solver/test_optimizer.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_solver/test_optimizer.py
unit_test_model:
runs-on: [t_cluster]
timeout-minutes: 5
timeout-minutes: 10
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event_name == 'schedule' && 'develop' || github.event_name == 'workflow_dispatch' && '' }}

- name: test_embedding_accuracy
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_embedding.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_embedding.py
- name: test_model_internlm_accuracy
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_model_internlm.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_model_internlm.py
- name: test_norm_accuracy
run: |
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_norm.py
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_norm.py

0 comments on commit b79d5ea

Please sign in to comment.