diff --git a/.github/actions/nm-benchmark/action.yml b/.github/actions/nm-benchmark/action.yml index 97606af72760d..17606e3995e6c 100644 --- a/.github/actions/nm-benchmark/action.yml +++ b/.github/actions/nm-benchmark/action.yml @@ -12,7 +12,8 @@ inputs: required: true venv: description: 'name for python virtual environment' - required: true + required: false + default: "" runs: using: composite steps: diff --git a/.github/actions/nm-install-whl/action.yml b/.github/actions/nm-install-whl/action.yml index 95d6722bb658f..1e3ce48064cf1 100644 --- a/.github/actions/nm-install-whl/action.yml +++ b/.github/actions/nm-install-whl/action.yml @@ -6,7 +6,8 @@ inputs: required: true venv: description: 'name for python virtual environment' - required: true + required: false + default: "" runs: using: composite steps: diff --git a/.github/actions/nm-lm-eval/action.yml b/.github/actions/nm-lm-eval/action.yml index f7f0b07fcf080..990290a017cec 100644 --- a/.github/actions/nm-lm-eval/action.yml +++ b/.github/actions/nm-lm-eval/action.yml @@ -6,7 +6,8 @@ inputs: required: true venv: description: 'name for python virtual environment' - required: true + required: false + default: "" lm_eval_configuration: description: 'file containing test configuration' required: true @@ -15,7 +16,7 @@ runs: steps: - id: lm-eval run: | - if [ -n "${{ inputs.venv }}" ]; then + if [ ! -z "${{ inputs.venv }}" ]; then COMMIT=${{ github.sha }} VENV="${{ inputs.venv }}-${COMMIT:0:7}" source $(pyenv root)/versions/${{ inputs.python }}/envs/${VENV}/bin/activate diff --git a/.github/actions/nm-produce-gha-benchmark-json/action.yml b/.github/actions/nm-produce-gha-benchmark-json/action.yml index 6792a98763b59..6ae61eda15e99 100644 --- a/.github/actions/nm-produce-gha-benchmark-json/action.yml +++ b/.github/actions/nm-produce-gha-benchmark-json/action.yml @@ -17,7 +17,8 @@ inputs: required: true venv: description: 'name for python virtual environment' - required: true + required: false + default: "" runs: using: composite steps: diff --git a/.github/workflows/nm-benchmark.yml b/.github/workflows/nm-benchmark.yml index 9ab70c84a357c..f64f91ee483ad 100644 --- a/.github/workflows/nm-benchmark.yml +++ b/.github/workflows/nm-benchmark.yml @@ -122,7 +122,6 @@ jobs: uses: ./.github/actions/nm-install-whl/ with: python: ${{ inputs.python }} - venv: - name: run benchmarks uses: ./.github/actions/nm-benchmark/ @@ -130,7 +129,6 @@ jobs: benchmark_config_list_file: ${{ inputs.benchmark_config_list_file }} output_directory: benchmark-results python: ${{ inputs.python }} - venv: - name: store benchmark result artifacts if: success() @@ -171,7 +169,6 @@ jobs: # Metrics that we only want to observe are stored here observation_metrics_output_file_path: gh-action-benchmark-jsons/observation_metrics.json python: ${{ inputs.python }} - venv: - name: set gh action benchmark input artifact name id: set_gh_action_benchmark_input_artifact_name diff --git a/.github/workflows/nm-lm-eval.yml b/.github/workflows/nm-lm-eval.yml index 4ffeb76ef9f15..9935ec9bbafa1 100644 --- a/.github/workflows/nm-lm-eval.yml +++ b/.github/workflows/nm-lm-eval.yml @@ -107,11 +107,9 @@ jobs: uses: ./.github/actions/nm-install-whl/ with: python: ${{ inputs.python }} - venv: - name: run lm-eval-accuracy uses: ./.github/actions/nm-lm-eval/ with: python: ${{ inputs.python }} - venv: lm_eval_configuration: ${{ inputs.lm_eval_configuration }} diff --git a/.github/workflows/nm-test.yml b/.github/workflows/nm-test.yml index 01d6fa96730d3..379111b345efa 100644 --- a/.github/workflows/nm-test.yml +++ b/.github/workflows/nm-test.yml @@ -124,7 +124,6 @@ jobs: uses: ./.github/actions/nm-install-whl/ with: python: ${{ inputs.python }} - venv: - name: run buildkite script run: |