From 9dc4ce9ea73dd46c4f63a3182a8cbd1cef1b886f Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 31 Oct 2023 11:59:21 +0100 Subject: [PATCH] Disable CI runner check (#27170) Disable runner check Co-authored-by: ydshieh --- .github/workflows/self-nightly-scheduled.yml | 34 ------------------ .github/workflows/self-past.yml | 34 ------------------ .github/workflows/self-push.yml | 34 ------------------ .github/workflows/self-scheduled.yml | 36 -------------------- utils/notification_service.py | 11 +++--- 5 files changed, 7 insertions(+), 142 deletions(-) diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 713e004d8e5881..f5d96828183e14 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -21,36 +21,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci] - container: - image: huggingface/transformers-all-latest-torch-nightly-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -276,8 +248,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -288,8 +258,6 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v3 @@ -303,8 +271,6 @@ jobs: CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} CI_EVENT: Nightly CI - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index 71f904c831e944..ad624fb2581b07 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -32,36 +32,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci] - container: - image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -319,8 +291,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -331,8 +301,6 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v3 @@ -351,8 +319,6 @@ jobs: CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }} - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index e4b1b3b4b235d1..15035704d0aed7 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -27,36 +27,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci] - container: - image: huggingface/transformers-all-latest-gpu-push-ci - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -521,8 +493,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -534,9 +504,7 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" echo "Setup status: ${{ needs.setup.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" # Necessary to get the correct branch name and commit SHA for `workflow_run` event # We also take into account the `push` event (we might want to test some changes in a branch) @@ -589,8 +557,6 @@ jobs: CI_TITLE_PUSH: ${{ github.event.head_commit.message }} CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }} CI_SHA: ${{ env.CI_SHA }} - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 2bd6bbade1cbe2..09ea3af0d44c39 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -25,36 +25,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-scheduled-ci-runner-docker,multi-gpu-scheduled-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] - container: - image: huggingface/transformers-all-latest-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -430,8 +402,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -480,8 +450,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -496,8 +464,6 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v3 @@ -513,8 +479,6 @@ jobs: CI_EVENT: scheduled CI_SHA: ${{ github.sha }} CI_WORKFLOW_REF: ${{ github.workflow_ref }} - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. diff --git a/utils/notification_service.py b/utils/notification_service.py index 610d597d2307fa..4df3b299448475 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -769,12 +769,15 @@ def prepare_reports(title, header, reports, to_truncate=True): if __name__ == "__main__": - runner_status = os.environ.get("RUNNER_STATUS") - runner_env_status = os.environ.get("RUNNER_ENV_STATUS") + # runner_status = os.environ.get("RUNNER_STATUS") + # runner_env_status = os.environ.get("RUNNER_ENV_STATUS") setup_status = os.environ.get("SETUP_STATUS") - runner_not_available = True if runner_status is not None and runner_status != "success" else False - runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False + # runner_not_available = True if runner_status is not None and runner_status != "success" else False + # runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False + # Let's keep the lines regardig runners' status (we might be able to use them again in the future) + runner_not_available = False + runner_failed = False setup_failed = True if setup_status is not None and setup_status != "success" else False org = "huggingface"