Skip to content

(feat): read_elem_as_dask method #1635

(feat): read_elem_as_dask method

(feat): read_elem_as_dask method #1635

Workflow file for this run

name: AWS GPU
on:
push:
branches: [main, "[0-9]+.[0-9]+.x"]
pull_request:
types:
- labeled
- opened
- synchronize
# Cancel the job if new commits are pushed
# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
# There are two jobs:
# 1. `check` determines if the second job (`test`) will be run (through a job dependency).
# 2. `test` runs on an AWS runner and executes the GPU tests.
jobs:
# If the `skip-gpu-ci` label is set, this job is skipped, and consequently the `test` job too.
# If the `run-gpu-ci` label is set or we reacted to a `push` event, this job succeeds (and `test` is run).
# If neither is set, this job fails, `test` is skipped, and the whole workflow fails.
check:
name: "Triage: Check if GPU tests are allowed to run"
if: (!contains(github.event.pull_request.labels.*.name, 'skip-gpu-ci'))
runs-on: ubuntu-latest
steps:
- uses: flying-sheep/check@v1
with:
success: ${{ github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'run-gpu-ci') }}
# If `check` wasn’t skipped or failed, start an AWS runner and run the GPU tests on it.
test:
name: GPU Tests
needs: check
runs-on: "cirun-aws-gpu--${{ github.run_id }}"
# Setting a timeout of 30 minutes, as the AWS costs money
# At time of writing, a typical run takes about 5 minutes
timeout-minutes: 30
defaults:
run:
shell: bash -el {0}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Nvidia SMI sanity check
run: nvidia-smi
- uses: mamba-org/setup-micromamba@v1
with:
micromamba-version: "1.5.6-0"
environment-file: ci/gpu_ci.yml
post-cleanup: "all"
- name: Install AnnData
run: uv pip install -e .[dev,test,gpu]
- name: Env list
run: |
micromamba list
pip list
- name: Run test
run: pytest -m gpu --cov --cov-report=xml --cov-context=test -n 4
- uses: codecov/codecov-action@v3
with:
flags: gpu-tests