From 9d9c991c7dbeb06e74d03146f79066a2ea1eeb2b Mon Sep 17 00:00:00 2001 From: Mike Walmsley Date: Mon, 20 Nov 2023 10:59:00 -0500 Subject: [PATCH] prepare to update dependencies --- README.md | 10 +++--- benchmarks/pytorch/run_benchmarks.sh | 17 ++++----- setup.py | 54 ++++++++++++++++------------ 3 files changed, 46 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 8bcdc1f5..93430ad8 100755 --- a/README.md +++ b/README.md @@ -32,16 +32,16 @@ Download the code using git: And then pick one of the three commands below to install Zoobot and either PyTorch (recommended) or TensorFlow: - # Zoobot with PyTorch and a GPU. Requires CUDA 11.3. - pip install -e "zoobot[pytorch_cu113]" --extra-index-url https://download.pytorch.org/whl/cu113 + # Zoobot with PyTorch and a GPU. Requires CUDA 12.1 (or CUDA 11.8, if you use `_cu118` instead) + pip install -e "zoobot[pytorch-cu121]" --extra-index-url https://download.pytorch.org/whl/cu121 # OR Zoobot with PyTorch and no GPU - pip install -e "zoobot[pytorch_cpu]" --extra-index-url https://download.pytorch.org/whl/cpu + pip install -e "zoobot[pytorch-cpu]" --extra-index-url https://download.pytorch.org/whl/cpu # OR Zoobot with PyTorch on Mac with M1 chip - pip install -e "zoobot[pytorch_m1]" + pip install -e "zoobot[pytorch-m1]" - # OR Zoobot with TensorFlow. Works with and without a GPU, but if you have a GPU, you need CUDA 11.2. + # OR Zoobot with TensorFlow. Works with and without a GPU, but if you have a GPU, you need CUDA 11.2. pip install -e "zoobot[tensorflow] This installs the downloaded Zoobot code using pip [editable mode](https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs) so you can easily change the code locally. Zoobot is also available directly from pip (`pip install zoobot[option]`). Only use this if you are sure you won't be making changes to Zoobot itself. For Google Colab, use `pip install zoobot[pytorch_colab]` diff --git a/benchmarks/pytorch/run_benchmarks.sh b/benchmarks/pytorch/run_benchmarks.sh index 07094601..3ff5e946 100755 --- a/benchmarks/pytorch/run_benchmarks.sh +++ b/benchmarks/pytorch/run_benchmarks.sh @@ -13,11 +13,11 @@ SEED=$RANDOM # GZ Evo i.e. all galaxies -# effnet, greyscale and color -# sbatch --job-name=evo_py_gr_eff_224_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB -# sbatch --job-name=evo_py_gr_eff_300_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=256,RESIZE_AFTER_CROP=300,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB -# sbatch --job-name=evo_py_co_eff_224_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,COLOR_STRING=--color,GPUS=2,SEED=$SEED $TRAIN_JOB -# sbatch --job-name=evo_py_co_eff_300_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=128,RESIZE_AFTER_CROP=300,DATASET=gz_evo,COLOR_STRING=--color,GPUS=2,SEED=$SEED $TRAIN_JOB +# effnet, greyscale and color, 224 and 300px +sbatch --job-name=evo_py_gr_eff_224_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB +sbatch --job-name=evo_py_gr_eff_300_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=256,RESIZE_AFTER_CROP=300,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB +sbatch --job-name=evo_py_co_eff_224_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,COLOR_STRING=--color,GPUS=2,SEED=$SEED $TRAIN_JOB +sbatch --job-name=evo_py_co_eff_300_$SEED --export=ARCHITECTURE=efficientnet_b0,BATCH_SIZE=128,RESIZE_AFTER_CROP=300,DATASET=gz_evo,COLOR_STRING=--color,GPUS=2,SEED=$SEED $TRAIN_JOB # and resnet18 # sbatch --job-name=evo_py_gr_res18_224_$SEED --export=ARCHITECTURE=resnet18,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB @@ -26,7 +26,7 @@ SEED=$RANDOM # sbatch --job-name=evo_py_gr_res50_224_$SEED --export=ARCHITECTURE=resnet50,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB # sbatch --job-name=evo_py_gr_res50_300_$SEED --export=ARCHITECTURE=resnet50,BATCH_SIZE=256,RESIZE_AFTER_CROP=300,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB # color 224 version -sbatch --job-name=evo_py_co_res50_224_$SEED --export=ARCHITECTURE=resnet50,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,COLOR_STRING=--color,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB +# sbatch --job-name=evo_py_co_res50_224_$SEED --export=ARCHITECTURE=resnet50,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,COLOR_STRING=--color,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB # and with max-vit tiny because hey transformers are cool # smaller batch size due to memory @@ -35,11 +35,12 @@ sbatch --job-name=evo_py_co_res50_224_$SEED --export=ARCHITECTURE=resnet50,BATCH # and max-vit small (works badly) # sbatch --job-name=evo_py_gr_vitsmall_224_$SEED --export=ARCHITECTURE=maxvit_small_224,BATCH_SIZE=64,RESIZE_AFTER_CROP=224,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB -# and convnext (works badly) +# and convnext (works badly, would really like to try again but bigger) # sbatch --job-name=evo_py_gr_$SEED --export=ARCHITECTURE=convnext_nano,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB # and vit # sbatch --job-name=evo_py_gr_vittinyp16_224_$SEED --export=ARCHITECTURE=vit_tiny_patch16_224,BATCH_SIZE=128,RESIZE_AFTER_CROP=224,DATASET=gz_evo,MIXED_PRECISION_STRING=--mixed-precision,GPUS=2,SEED=$SEED $TRAIN_JOB - +# and swinv2 +# TODO # and in color with no mixed precision, for specific project # sbatch --job-name=evo_py_co_res50_224_fullprec_$SEED --export=ARCHITECTURE=resnet50,BATCH_SIZE=256,RESIZE_AFTER_CROP=224,DATASET=gz_evo,COLOR_STRING=--color,GPUS=2,SEED=$SEED $TRAIN_JOB diff --git a/setup.py b/setup.py index 0da18e1b..712233e5 100755 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="zoobot", - version="1.0.5", + version="1.0.6", author="Mike Walmsley", author_email="walmsleymk1@gmail.com", description="Galaxy morphology classifiers", @@ -22,51 +22,61 @@ packages=setuptools.find_packages(), python_requires=">=3.8", # recommend 3.9 for new users. TF needs >=3.7.2, torchvision>=3.8 extras_require={ - 'pytorch_cpu': [ + 'pytorch-cpu': [ # A100 GPU currently only seems to support cuda 11.3 on manchester cluster, let's stick with this version for now # very latest version wants cuda 11.6 - 'torch == 1.12.1+cpu', - 'torchvision == 0.13.1+cpu', - 'torchaudio == 0.12.1', + 'torch == 2.1.0+cpu', + 'torchvision == 0.16.0+cpu', + 'torchaudio >= 2.1.0', 'pytorch-lightning >= 2.0.0', # 'simplejpeg', 'albumentations', - 'pyro-ppl == 1.8.0', + 'pyro-ppl >= 1.8.6', 'torchmetrics == 0.11.0', - 'timm == 0.6.12' + 'timm == 0.9.10' ], - 'pytorch_m1': [ + 'pytorch-m1': [ # as above but without the +cpu (and the extra-index-url in readme has no effect) # all matching pytorch versions for an m1 system will be cpu - 'torch == 1.12.1', - 'torchvision == 0.13.1', - 'torchaudio == 0.12.1', + 'torch == 2.1.0', + 'torchvision == 0.16.0', + 'torchaudio >= 2.1.0', 'pytorch-lightning >= 2.0.0', 'albumentations', - 'pyro-ppl == 1.8.0', + 'pyro-ppl >= 1.8.6', 'torchmetrics == 0.11.0', - 'timm == 0.6.12' + 'timm >= 0.9.10' ], # as above but without pytorch itself # for GPU, you will also need e.g. cudatoolkit=11.3, 11.6 # https://pytorch.org/get-started/previous-versions/#v1121 - 'pytorch_cu113': [ - 'torch == 1.12.1+cu113', - 'torchvision == 0.13.1+cu113', - 'torchaudio == 0.12.1', + 'pytorch-cu118': [ + 'torch == 2.1.0+cu118', + 'torchvision == 0.16.0+cu118', + 'torchaudio >= 2.1.0', 'pytorch-lightning >= 2.0.0', 'albumentations', - 'pyro-ppl == 1.8.0', + 'pyro-ppl >= 1.8.6', 'torchmetrics == 0.11.0', - 'timm == 0.6.12' - ], - 'pytorch_colab': [ + 'timm >= 0.9.10' + ], # exactly as above, but _cu121 for cuda 12.1 (the current default) + 'pytorch-cu121': [ + 'torch == 2.1.0+cu121', + 'torchvision == 0.16.0+cu121', + 'torchaudio >= 2.1.0', + 'pytorch-lightning >= 2.0.0', + 'albumentations', + 'pyro-ppl >= 1.8.6', + 'torchmetrics == 0.11.0', + 'timm >= 0.9.10' + ], + 'pytorch-colab': [ # colab includes pytorch already 'pytorch-lightning >= 2.0.0', 'albumentations', 'pyro-ppl>=1.8.0', 'torchmetrics==0.11.0', - 'timm == 0.6.12' + 'timm == 0.9.10' ], # TODO may add narval/Digital Research Canada config 'tensorflow': [