Skip to content

[Pallas] Set the major-minor layout for inputs and outputs #6617

[Pallas] Set the major-minor layout for inputs and outputs

[Pallas] Set the major-minor layout for inputs and outputs #6617

on:
pull_request:
branches:
- master
- r[0-9]+.[0-9]+
paths-ignore:
- 'experimental/torch_xla2/**'
push:
branches:
- master
- r[0-9]+.[0-9]+
paths-ignore:
- 'experimental/torch_xla2/**'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
cancel-in-progress: true
jobs:
build:
name: "Build XLA"
uses: ./.github/workflows/_build.yml
with:
ecr-docker-image-base: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/xla_base
gcr-docker-image: gcr.io/tpu-pytorch/xla_base:dev-3.8_cuda_12.1
cuda: 1
secrets:
gcloud-service-key: ${{ secrets.GCLOUD_SERVICE_KEY }}
test-cpu:
name: "CPU tests"
uses: ./.github/workflows/_test.yml
needs: build
with:
docker-image: ${{ needs.build.outputs.docker-image }}
timeout-minutes: 120
collect-coverage: false
disable-xrt: 1
secrets:
gcloud-service-key: ${{ secrets.GCLOUD_SERVICE_KEY }}
test-cuda:
name: "GPU tests"
uses: ./.github/workflows/_test.yml
needs: build
with:
docker-image: ${{ needs.build.outputs.docker-image }}
runner: linux.8xlarge.nvidia.gpu
timeout-minutes: 300
collect-coverage: false # TODO(yeounoh) separate from CPU coverage metrics
disable-xrt: 1
secrets:
gcloud-service-key: ${{ secrets.GCLOUD_SERVICE_KEY }}
push-docs:
name: "Build & publish docs"
if: github.event_name == 'push' && (github.event.ref == 'refs/heads/master' || startsWith(github.event.ref, 'refs/tags/r'))
uses: ./.github/workflows/_docs.yml
needs: build
with:
docker-image: ${{ needs.build.outputs.docker-image }}
secrets:
torchxla-bot-token: ${{ secrets.TORCH_XLA_BOT_TOKEN }}