forked from ROCm/transformers
-
Notifications
You must be signed in to change notification settings - Fork 0
144 lines (111 loc) · 7.22 KB
/
fork-maintenance.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
name: Run Scheduled Events Docker
permissions:
actions: write
contents: write
issues: write
pull-requests: write
on:
workflow_dispatch:
schedule:
- cron: '0 0 10 * *'
jobs:
run-scheduled-events:
uses: Cemberk/fork-maintenance-system/.github/workflows/fork-maintenance-action.yml@artifacts
with:
platform: 'gfx90a'
upstream_repo: 'https://github.com/huggingface/transformers'
pr_branch_prefix: 'scheduled-merge'
requirements_command: |
rm -rf $(pip show numpy | grep Location: | awk '{print $2}')/numpy* &&
sudo sed -i 's/torchaudio//g' examples/pytorch/_tests_requirements.txt &&
pip install -r examples/pytorch/_tests_requirements.txt &&
git restore examples/pytorch/_tests_requirements.txt &&
pip install --no-cache-dir GPUtil azureml azureml-core tokenizers ninja cerberus sympy sacremoses sacrebleu==1.5.1 sentencepiece scipy scikit-learn urllib3 && pip install huggingface_hub datasets &&
pip install parameterized &&
pip install -e .
#unit_test_command: cd tests; folders=$$(python3 -c 'import os; tests = os.getcwd(); models = "models"; model_tests = os.listdir(os.path.join(tests, models)); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [os.path.join(models, x) for x in model_tests]))); d1.remove(models); d = d2 + d1; print(" ".join(d[:5]))'); cd ..; for folder in \${folders[@]}; do pytest tests/\${folder} -v --make-reports=huggingface_unit_tests_\${machine_type}_run_models_gpu_\${folder} -rfEs --continue-on-collection-errors -m \"not not_device_test\" -p no:cacheprovider; done; allstats=\$(find reports -name stats.txt); for stat in \${allstats[@]}; do echo \$stat; cat \$stat; done
#unit_test_command: folders=\$(python3 -c 'import os; print(\"hello\")'); echo \$folders; exit 1;
#cd tests; folders=\$(python3 -c \import os; tests = os.getcwd(); models = \"models\"; model_tests = os.listdir(os.path.join(tests, models)); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [os.path.join(models, x) for x in model_tests]))); d1.remove(models); d = d2 + d1; print(\" \".join(d[:5]))' ); cd ..; for folder in \${folders[@]}; do pytest tests/\${folder} -v --make-reports=huggingface_unit_tests_\${machine_type}_run_models_gpu_\${folder} -rfEs --continue-on-collection-errors -m \"not not_device_test\" -p no:cacheprovider; done; allstats=\$(find reports -name stats.txt); for stat in \${allstats[@]}; do echo \$stat; cat \$stat; done
unit_test_command: cd tests; folders=$(python3 -c 'import os; tests = os.getcwd(); models = "models"; model_tests = os.listdir(os.path.join(tests, models)); d1 = sorted([d for d in os.listdir(tests) if os.path.isdir(d) and d != models]); d2 = sorted([os.path.join(models, x) for x in os.listdir(os.path.join(tests, models)) if os.path.isdir(os.path.join(models, x))]); d = d2 + d1; print(" ".join(d[:5]))'); cd ..; for folder in ${folders[@]}; do pytest tests/${folder} -v --make-reports=huggingface_unit_tests_${machine_type}_run_models_gpu_${folder} -rfEs --continue-on-collection-errors -m "not not_device_test" -p no:cacheprovider; done; allstats=$(find reports -name stats.txt); for stat in ${allstats[@]}; do echo $stat; cat $stat; done
# unit_test_command: |
# set -x # Enable shell debugging
# echo "Running unit tests inside Docker..."
# echo "Current directory before changing to tests: $$(pwd)"
# cd tests || { echo "Failed to change directory to 'tests'"; exit 1; }
# echo "Current directory after changing to tests: $$(pwd)"
# Write Python code to a temporary script
# Write Python code to a temporary script
# cat << 'EOF' > get_folders.py
# import os
# tests = os.getcwd()
# models = "models"
# Debug: Print current working directory
# print(f"Current working directory: {tests}")
# Get list of model tests
# model_tests_path = os.path.join(tests, models)
# if not os.path.exists(model_tests_path):
# print(f"Models path does not exist: {model_tests_path}")
# exit(1)
# model_tests = os.listdir(model_tests_path)
# Debug: Print model tests
# print(f"Model tests: {model_tests}")
# d1 = sorted([d for d in os.listdir(tests) if os.path.isdir(os.path.join(tests, d))])
# d2 = sorted([d for d in model_tests if os.path.isdir(os.path.join(models, d))])
# Debug: Print directories
# print(f"d1 directories: {d1}")
# print(f"d2 directories: {d2}")
# if models in d1:
# d1.remove(models)
# d = d2 + d1
# Debug: Print combined directory list
# print(f"Combined directories (d): {d}")
# Print the first 5 directories
#print(" ".join(d[:5]))
#EOF
# echo "Contents of get_folders.py:"
# cat get_folders.py
# Execute the Python script and capture the output and errors
# folders=$(python3 get_folders.py 2>&1)
# exit_code=$?
# Optionally, you can print the output and exit code for debugging
# echo "Folders Output:"
# echo "$folders"
# echo "Exit Code: $exit_code"
# echo "Python script exit code: $exit_code"
# echo "Output from get_folders.py:"
# echo "$folders"
# if [ "$exit_code" -ne 0 ]; then
# echo "Python script failed with exit code $exit_code"
# exit $exit_code
# fi
# cd ..
# Convert folders string into an array
# IFS=' ' read -r -a folder_array <<< "$folders"
# echo "Folders obtained as array:"
# printf "'%s'\n" "${folder_array[@]}"
# if [ "${#folder_array[@]}" -eq 0 ]; then
# echo "No folders found to run tests on."
# exit 1
# fi
# Iterate over each folder and run pytest
# for folder in "${folder_array[@]}"; do
# echo "Running pytest on folder: $folder"
# pytest tests/$folder -v \
# --make-reports=huggingface_unit_tests_${machine_type}_run_models_gpu_$folder \
# -rfEs --continue-on-collection-errors \
# -m "not not_device_test" -p no:cacheprovider
# done
# Find and display stats
# allstats=$(find reports -name stats.txt)
# echo "All stats files found:"
# echo "$allstats"
# for stat in $$allstats; do
# echo "Stat file: $$stat"
# cat "$$stat"
# done
performance_test_command: 'echo \"python examples/pytorch/language-modeling/run_mlm.py --model_name_or_path bert-base-uncased --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --do_eval --output_dir /tmp/test-mlm --per_device_train_batch_size 8 --per_device_eval_batch_size 8 --max_steps 500\"'
docker_image: 'rocm/pytorch:latest'
docker_options: '--device=/dev/kfd --device=/dev/dri --group-add video --shm-size 16G --network=host'
secrets:
GIT_TOKEN: ${{ secrets.CRED_TOKEN }}
schedule_json: ${{ secrets.SCHEDULE_CONFIG }}