From 41fd9a5c0ee4019b9345fa1521e27925f811442c Mon Sep 17 00:00:00 2001 From: iefgnoix Date: Wed, 27 Mar 2024 21:20:56 +0000 Subject: [PATCH] should only fail in op3 test. --- .circleci/common.sh | 38 +------------------------------------- test/run_tests.sh | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/.circleci/common.sh b/.circleci/common.sh index 74d6bb922f2..ce56c947971 100755 --- a/.circleci/common.sh +++ b/.circleci/common.sh @@ -132,42 +132,6 @@ function run_torch_xla_python_tests() { chmod -R 755 ~/htmlcov else ./test/run_tests.sh - - # CUDA tests - if [ -x "$(command -v nvidia-smi)" ]; then - # single-host-single-process - PJRT_DEVICE=CUDA python3 test/test_train_mp_imagenet.py --fake_data --batch_size=16 --num_epochs=1 --num_cores=1 --num_steps=25 --model=resnet18 - PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=1 test/test_train_mp_imagenet.py --fake_data --pjrt_distributed --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18 - - # single-host-multi-process - num_devices=$(nvidia-smi --list-gpus | wc -l) - PJRT_DEVICE=CUDA GPU_NUM_DEVICES=$GPU_NUM_DEVICES python3 test/test_train_mp_imagenet.py --fake_data --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18 - PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=$num_devices test/test_train_mp_imagenet.py --fake_data --pjrt_distributed --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18 - - # single-host-SPMD - # TODO: Reduce BS due to GPU test OOM in CI after pin update to 03/05/2024 (#6677) - XLA_USE_SPMD=1 PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=1 test/spmd/test_train_spmd_imagenet.py --fake_data --batch_size 8 --model=resnet50 --sharding=batch --num_epochs=1 --num_steps=25 --model=resnet18 - - # TODO: Reduce BS due to GPU test OOM in CI after pin update to 03/05/2024 (#6677) - PJRT_DEVICE=CUDA python test/test_train_mp_imagenet_fsdp.py --fake_data --use_nested_fsdp --use_small_fake_sample --num_epochs=1 --batch_size 32 --test_set_batch_size 32 - # TODO: Reduce BS due to GPU test OOM in CI after pin update to 03/05/2024 (#6677) - PJRT_DEVICE=CUDA python test/test_train_mp_imagenet_fsdp.py --fake_data --auto_wrap_policy type_based --use_small_fake_sample --num_epochs=1 --batch_size 32 --test_set_batch_size 32 - XLA_DISABLE_FUNCTIONALIZATION=1 PJRT_DEVICE=CUDA python test/test_train_mp_imagenet_fsdp.py --fake_data --use_nested_fsdp --use_small_fake_sample --num_epochs=1 - # Syncfree SGD optimizer tests - if [ -d ./torch_xla/amp/syncfree ]; then - echo "Running Syncfree Optimizer Test" - PJRT_DEVICE=CUDA python test/test_syncfree_optimizers.py - - # Following test scripts are mainly useful for - # performance evaluation & comparison among different - # amp optimizers. - echo "Running ImageNet Test" - python test/test_train_mp_imagenet_amp.py --fake_data --num_epochs=1 - - echo "Running MNIST Test" - python test/test_train_mp_mnist_amp.py --fake_data --num_epochs=1 - fi - fi fi popd } @@ -240,7 +204,7 @@ function run_torch_xla_tests() { run_torch_xla_benchmark_tests $XLA_DIR else # run tests separately. - if [[ "$RUN_PYTHON_TESTS" == "python_tests" ]]; then + if [[ "$RUN_PYTHON_TESTS" == "python_tests" && ]]; then run_torch_xla_python_tests $PYTORCH_DIR $XLA_DIR $USE_COVERAGE elif [[ "$RUN_BENCHMARK_TESTS" == "benchmark_tests" ]]; then run_torch_xla_benchmark_tests $XLA_DIR diff --git a/test/run_tests.sh b/test/run_tests.sh index 840f2e25d97..8ddc0494647 100755 --- a/test/run_tests.sh +++ b/test/run_tests.sh @@ -240,6 +240,42 @@ function run_xla_op_tests3 { # NOTE: this line below is testing export and don't care about GPU PJRT_DEVICE=CPU CPU_NUM_DEVICES=1 run_coverage "$CDIR/test_core_aten_ops.py" run_test "$CDIR/test_pallas.py" + + # CUDA tests + if [ -x "$(command -v nvidia-smi)" ]; then + # single-host-single-process + PJRT_DEVICE=CUDA python3 test/test_train_mp_imagenet.py --fake_data --batch_size=16 --num_epochs=1 --num_cores=1 --num_steps=25 --model=resnet18 + PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=1 test/test_train_mp_imagenet.py --fake_data --pjrt_distributed --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18 + + # single-host-multi-process + num_devices=$(nvidia-smi --list-gpus | wc -l) + PJRT_DEVICE=CUDA GPU_NUM_DEVICES=$GPU_NUM_DEVICES python3 test/test_train_mp_imagenet.py --fake_data --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18 + PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=$num_devices test/test_train_mp_imagenet.py --fake_data --pjrt_distributed --batch_size=16 --num_epochs=1 --num_steps=25 --model=resnet18 + + # single-host-SPMD + # TODO: Reduce BS due to GPU test OOM in CI after pin update to 03/05/2024 (#6677) + XLA_USE_SPMD=1 PJRT_DEVICE=CUDA torchrun --nnodes=1 --node_rank=0 --nproc_per_node=1 test/spmd/test_train_spmd_imagenet.py --fake_data --batch_size 8 --model=resnet50 --sharding=batch --num_epochs=1 --num_steps=25 --model=resnet18 + + # TODO: Reduce BS due to GPU test OOM in CI after pin update to 03/05/2024 (#6677) + PJRT_DEVICE=CUDA python test/test_train_mp_imagenet_fsdp.py --fake_data --use_nested_fsdp --use_small_fake_sample --num_epochs=1 --batch_size 32 --test_set_batch_size 32 + # TODO: Reduce BS due to GPU test OOM in CI after pin update to 03/05/2024 (#6677) + PJRT_DEVICE=CUDA python test/test_train_mp_imagenet_fsdp.py --fake_data --auto_wrap_policy type_based --use_small_fake_sample --num_epochs=1 --batch_size 32 --test_set_batch_size 32 + XLA_DISABLE_FUNCTIONALIZATION=1 PJRT_DEVICE=CUDA python test/test_train_mp_imagenet_fsdp.py --fake_data --use_nested_fsdp --use_small_fake_sample --num_epochs=1 + # Syncfree SGD optimizer tests + if [ -d ./torch_xla/amp/syncfree ]; then + echo "Running Syncfree Optimizer Test" + PJRT_DEVICE=CUDA python test/test_syncfree_optimizers.py + + # Following test scripts are mainly useful for + # performance evaluation & comparison among different + # amp optimizers. + echo "Running ImageNet Test" + python test/test_train_mp_imagenet_amp.py --fake_data --num_epochs=1 + + echo "Running MNIST Test" + python test/test_train_mp_mnist_amp.py --fake_data --num_epochs=1 + fi + fi } #######################################################################################