diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a5590f0a..5373da9a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -5,6 +5,9 @@ on: pull_request: types: [opened, synchronize, reopened, ready_for_review] branches: [main, dev] + push: + branches: + - "ci-gpu" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -80,13 +83,11 @@ jobs: licenses-audits: uses: argumentcomputer/ci-workflows/.github/workflows/licenses-audits.yml@main - # Runs the test suite on a self-hosted GPU machine with CUDA enabled + # Runs the test suite on a GPU machine with CUDA enabled test-cuda: name: Rust tests on CUDA - runs-on: self-hosted + runs-on: gpu-ci-t4 env: - NVIDIA_VISIBLE_DEVICES: all - NVIDIA_DRIVER_CAPABILITITES: compute,utility EC_GPU_FRAMEWORK: cuda steps: - uses: actions/checkout@v4 @@ -99,6 +100,18 @@ jobs: - uses: Swatinem/rust-cache@v2 # Check we have access to the machine's Nvidia drivers - run: nvidia-smi + - name: Install CUDA + run: | + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb + sudo apt-get update + sudo apt-get -y install cuda-toolkit-12-4 + echo "PATH=/usr/local/cuda/bin:${{ env.PATH }}" >> $GITHUB_ENV + - run: nvcc --version + - name: Install deps + run: | + sudo apt-get update + sudo apt-get install -y build-essential # The `compute`/`sm` number corresponds to the Nvidia GPU architecture # In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ @@ -114,13 +127,11 @@ jobs: run: | cargo nextest run --release --no-default-features --features cuda,pasta,bls,arity2,arity4,arity8,arity11,arity16,arity24,arity36 - # Runs the test suite on a self-hosted GPU machine with CUDA and OpenCL enabled (that is using the OpenCL backend for NVIDIA GPUs) + # Runs the test suite on a GPU machine with CUDA and OpenCL enabled (that is using the OpenCL backend for NVIDIA GPUs) test-opencl: name: Rust tests on OpenCL - runs-on: self-hosted + runs-on: gpu-ci-t4 env: - NVIDIA_VISIBLE_DEVICES: all - NVIDIA_DRIVER_CAPABILITITES: compute,utility EC_GPU_FRAMEWORK: opencl steps: - uses: actions/checkout@v4 @@ -131,12 +142,20 @@ jobs: - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 - - name: Install GPU deps - run: | - apt-get update - apt-get -y install ocl-icd-opencl-dev # Check we have access to the machine's Nvidia drivers - run: nvidia-smi + - name: Install CUDA + run: | + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb + sudo apt-get update + sudo apt-get -y install cuda-toolkit-12-4 + echo "PATH=/usr/local/cuda/bin:${{ env.PATH }}" >> $GITHUB_ENV + - run: nvcc --version + - name: Install deps + run: | + sudo apt-get update + sudo apt-get -y install build-essential ocl-icd-opencl-dev # The `compute`/`sm` number corresponds to the Nvidia GPU architecture # In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ diff --git a/deny.toml b/deny.toml index 22b076c7..8e1253b1 100644 --- a/deny.toml +++ b/deny.toml @@ -109,6 +109,7 @@ allow = [ "CC0-1.0", "Apache-2.0", "Unicode-DFS-2016", + "Unicode-3.0" ] # List of explicitly disallowed licenses # See https://spdx.org/licenses/ for list of possible licenses diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 30d2b8f4..85950331 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] # The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy. profile = "default" -channel = "1.76" +channel = "1.78"