Skip to content

Common dataset class #549

Common dataset class

Common dataset class #549

Workflow file for this run

# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: Tests
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
permissions:
contents: read
jobs:
tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Get python version from env.yaml
id: python_version
run: echo PYTHON_VERS=$(cat env.yaml | grep python= | sed 's/.*python=\([0-9]\.[0-9]*\.[0-9]*\).*/\1/') >> $GITHUB_OUTPUT
- name: Set up Python ${{ steps.python_version.outputs.PYTHON_VERS }}
uses: actions/setup-python@v3
with:
python-version: "${{ steps.python_version.outputs.PYTHON_VERS }}"
- name: Get pytorch version from env.yaml
id: pytorch_version
run: echo PYTORCH_VERS=$(cat env.yaml | grep torch== | sed 's/.*torch==\(.*\)/\1/') >> $GITHUB_OUTPUT
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt \
torch==${{ steps.pytorch_version.outputs.PYTORCH_VERS }}
pip install -r requirements_lint.txt
pip install --editable .
- name: Lint
run: |
./lint.sh .
- name: Integration Test with pytest
run: |
export PY4CAST_ROOTDIR=`pwd`
coverage run -p -m pytest tests/
coverage run -p bin/train.py --precision 32 --model HalfUNet --model_conf config/models/halfunet32.json --dataset dummy --epochs 1 --batch_size 1 --num_pred_steps_train 1 --limit_train_batches 1 --num_workers 1
coverage run -p bin/train.py --precision 32 --model HalfUNet --model_conf config/models/halfunet32.json --dataset dummy --epochs 1 --batch_size 1 --num_pred_steps_train 1 --limit_train_batches 1 --num_workers 1 --strategy scaled_ar
coverage run -p bin/inference.py --dataset dummy --model_path /home/runner/work/py4cast/py4cast/logs/camp0/dummy/HalfUNet/runn_run_0/
coverage run -p bin/train.py --precision 32 --model HiLAM --dataset dummy --epochs 1 --batch_size 1 --num_pred_steps_train 1 --limit_train_batches 1 --num_workers 1 --pin_memory
coverage run -p bin/train.py --precision 32 --model UNETRPP --dataset dummy --epochs 1 --batch_size 1 --num_pred_steps_train 1 --limit_train_batches 1 --num_workers 1
coverage combine
coverage report --ignore-errors --fail-under=60