From 0053719ca24c1b207fa3a8e5118ab9f186535293 Mon Sep 17 00:00:00 2001 From: derekk-nm Date: Mon, 15 Jul 2024 17:06:18 +0000 Subject: [PATCH] remove use of "venv" input to actions Per discussion in the PR and standup, since we're using the setup-python action in an earlier step of workflows where these actions were called, we don't need to be using any python virtual environments. --- .github/actions/nm-benchmark/action.yml | 12 ------------ .github/actions/nm-install-whl/action.yml | 13 ------------- .github/actions/nm-lm-eval/action.yml | 13 ------------- .github/workflows/nm-benchmark.yml | 3 --- .github/workflows/nm-lm-eval.yml | 3 --- .github/workflows/nm-test.yml | 2 -- 6 files changed, 46 deletions(-) diff --git a/.github/actions/nm-benchmark/action.yml b/.github/actions/nm-benchmark/action.yml index 17606e3995e6c..b6beba8868ea0 100644 --- a/.github/actions/nm-benchmark/action.yml +++ b/.github/actions/nm-benchmark/action.yml @@ -7,13 +7,6 @@ inputs: output_directory: description: 'output directory to store the benchmark results' required: true - python: - description: 'python version, e.g. 3.10.12' - required: true - venv: - description: 'name for python virtual environment' - required: false - default: "" runs: using: composite steps: @@ -23,11 +16,6 @@ runs: # move source directories mv vllm vllm-ignore || echo "no 'vllm' folder to move" mv csrc csrc-ignore || echo "no 'csrc' folder to move" - if [ ! -z "${{ inputs.venv }}" ]; then - COMMIT=${{ github.sha }} - VENV="${{ inputs.venv }}-${COMMIT:0:7}" - source $(pyenv root)/versions/${{ inputs.python }}/envs/${VENV}/bin/activate - fi pip3 install -r neuralmagic/benchmarks/requirements-benchmark.txt SUCCESS=0 .github/scripts/nm-run-benchmarks.sh ${{ inputs.benchmark_config_list_file }} ${{ inputs.output_directory }} || SUCCESS=$? diff --git a/.github/actions/nm-install-whl/action.yml b/.github/actions/nm-install-whl/action.yml index 1e3ce48064cf1..c843844ef90d6 100644 --- a/.github/actions/nm-install-whl/action.yml +++ b/.github/actions/nm-install-whl/action.yml @@ -1,13 +1,5 @@ name: install whl description: 'installs found whl based on python version into specified venv' -inputs: - python: - description: 'python version, e.g. 3.10.12' - required: true - venv: - description: 'name for python virtual environment' - required: false - default: "" runs: using: composite steps: @@ -17,11 +9,6 @@ runs: mv vllm vllm-ignore mv csrc csrc-ignore # activate and install - if [ ! -z "${{ inputs.venv }}" ]; then - COMMIT=${{ github.sha }} - VENV="${{ inputs.venv }}-${COMMIT:0:7}" - source $(pyenv root)/versions/${{ inputs.python }}/envs/${VENV}/bin/activate - fi pip3 install -r requirements-dev.txt WHL=$(find . -type f -iname "nm_vllm*.whl") WHL_BASENAME=$(basename ${WHL}) diff --git a/.github/actions/nm-lm-eval/action.yml b/.github/actions/nm-lm-eval/action.yml index 990290a017cec..c9b60c5c7e361 100644 --- a/.github/actions/nm-lm-eval/action.yml +++ b/.github/actions/nm-lm-eval/action.yml @@ -1,13 +1,6 @@ name: run lm-eval accuracy test description: 'run lm-eval accuracy test' inputs: - python: - description: 'python version, e.g. 3.10.12' - required: true - venv: - description: 'name for python virtual environment' - required: false - default: "" lm_eval_configuration: description: 'file containing test configuration' required: true @@ -16,12 +9,6 @@ runs: steps: - id: lm-eval run: | - if [ ! -z "${{ inputs.venv }}" ]; then - COMMIT=${{ github.sha }} - VENV="${{ inputs.venv }}-${COMMIT:0:7}" - source $(pyenv root)/versions/${{ inputs.python }}/envs/${VENV}/bin/activate - fi - pip3 install git+https://github.com/EleutherAI/lm-evaluation-harness.git@262f879a06aa5de869e5dd951d0ff2cf2f9ba380 pip3 install pytest openai==1.3.9 diff --git a/.github/workflows/nm-benchmark.yml b/.github/workflows/nm-benchmark.yml index f64f91ee483ad..e25f5e5a920cd 100644 --- a/.github/workflows/nm-benchmark.yml +++ b/.github/workflows/nm-benchmark.yml @@ -120,15 +120,12 @@ jobs: - name: install whl id: install_whl uses: ./.github/actions/nm-install-whl/ - with: - python: ${{ inputs.python }} - name: run benchmarks uses: ./.github/actions/nm-benchmark/ with: benchmark_config_list_file: ${{ inputs.benchmark_config_list_file }} output_directory: benchmark-results - python: ${{ inputs.python }} - name: store benchmark result artifacts if: success() diff --git a/.github/workflows/nm-lm-eval.yml b/.github/workflows/nm-lm-eval.yml index 9935ec9bbafa1..fede1eeb2719b 100644 --- a/.github/workflows/nm-lm-eval.yml +++ b/.github/workflows/nm-lm-eval.yml @@ -105,11 +105,8 @@ jobs: - name: install whl id: install_whl uses: ./.github/actions/nm-install-whl/ - with: - python: ${{ inputs.python }} - name: run lm-eval-accuracy uses: ./.github/actions/nm-lm-eval/ with: - python: ${{ inputs.python }} lm_eval_configuration: ${{ inputs.lm_eval_configuration }} diff --git a/.github/workflows/nm-test.yml b/.github/workflows/nm-test.yml index 379111b345efa..faf7d38785232 100644 --- a/.github/workflows/nm-test.yml +++ b/.github/workflows/nm-test.yml @@ -122,8 +122,6 @@ jobs: - name: install whl uses: ./.github/actions/nm-install-whl/ - with: - python: ${{ inputs.python }} - name: run buildkite script run: |