diff --git a/.github/workflows/pr-all-test-ubuntu-setup.yml b/.github/workflows/pr-all-test-ubuntu-setup.yml
index d239f37ed..d0f76fd91 100644
--- a/.github/workflows/pr-all-test-ubuntu-setup.yml
+++ b/.github/workflows/pr-all-test-ubuntu-setup.yml
@@ -48,6 +48,8 @@ jobs:
- name: Run setup-dev-no-docker
# only run if core, local, models wasn't changed
if: steps.filter.outputs.core == 'true'
+ env:
+ BUILD_DEBUG: 1
run: |
make setup-dev-no-docker
- name: login to comps2
@@ -105,6 +107,8 @@ jobs:
if: steps.filter.outputs.core == 'true' || steps.filter.outputs.cli == 'true'
run: |
make setup-dev-no-docker
+ env:
+ BUILD_DEBUG: 1
- name: login to comps2
if: steps.filter.outputs.core == 'true' || steps.filter.outputs.cli == 'true'
run: |
@@ -164,6 +168,8 @@ jobs:
pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: Run setup-dev-no-docker
if: steps.filter.outputs.core == 'true' || steps.filter.outputs.models == 'true'
+ env:
+ BUILD_DEBUG: 1
run: |
make setup-dev-no-docker
- name: login to comps2
@@ -225,6 +231,8 @@ jobs:
pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: Run setup-dev-no-docker
if: steps.filter.outputs.core == 'true' || steps.filter.outputs.comps == 'true' || steps.filter.outputs.models == 'true'
+ env:
+ BUILD_DEBUG: 1
run: |
make setup-dev-no-docker
- name: login to comps2
@@ -268,20 +276,20 @@ jobs:
local:
- 'idmtools_platform_local/**'
- uses: actions/cache@v2
- if: steps.filter.outputs.core == 'true' || steps.filter.outputs.comps == 'true' || steps.filter.outputs.models == 'true'
+ if: steps.filter.outputs.core == 'true' || steps.filter.outputs.local == 'true' || steps.filter.outputs.models == 'true'
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('**/*_requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- if: steps.filter.outputs.core == 'true' || steps.filter.outputs.comps == 'true' || steps.filter.outputs.models == 'true'
+ if: steps.filter.outputs.core == 'true' || steps.filter.outputs.local == 'true' || steps.filter.outputs.models == 'true'
uses: actions/setup-python@v2.1.4
with:
python-version: ${{ matrix.python-version }}
architecture: x64
- name: Install Python dependencies
- if: steps.filter.outputs.core == 'true' || steps.filter.outputs.comps == 'true' || steps.filter.outputs.models == 'true'
+ if: steps.filter.outputs.core == 'true' || steps.filter.outputs.local == 'true' || steps.filter.outputs.models == 'true'
run: |
pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: Run setup-dev
@@ -290,7 +298,7 @@ jobs:
run: |
make setup-dev
- name: login to comps2
- if: steps.filter.outputs.core == 'true' || steps.filter.outputs.comps == 'true' || steps.filter.outputs.models == 'true'
+ if: steps.filter.outputs.core == 'true' || steps.filter.outputs.local == 'true' || steps.filter.outputs.models == 'true'
run: |
python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
- name: run idmtools_platform_local all tests
diff --git a/.github/workflows/run-master-prod-test-curl.yml b/.github/workflows/run-all-test-ubuntu-prod-master-curl.yml
similarity index 74%
rename from .github/workflows/run-master-prod-test-curl.yml
rename to .github/workflows/run-all-test-ubuntu-prod-master-curl.yml
index c2e21f1a0..9b4810a50 100644
--- a/.github/workflows/run-master-prod-test-curl.yml
+++ b/.github/workflows/run-all-test-ubuntu-prod-master-curl.yml
@@ -8,7 +8,7 @@
#
# Note, this test will checkout 'master' branch
-name: "run-master-prod-test-curl"
+name: "run-all-test-ubuntu-prod-master-curl"
on:
repository_dispatch:
# Manually trigger with above curl or Postman POST
@@ -41,6 +41,7 @@ jobs:
run: |
pip install idmtools[full] --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
pip install idmtools-test --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
+ pip install idmtools[test] --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: login to comps2
run: |
python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
@@ -49,51 +50,51 @@ jobs:
COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
- name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_cli all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_core all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
+ path: idmtools_core/tests/reports/
- name: run idmtools_models tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_models all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
+ path: idmtools_models/tests/results/
- name: run idmtools_platform_comps tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-all
- name: Upload idmtools_platform_comps test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
- name: run idmtools_platform_local tests
run: |
- cd idmtools_platform_local/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_local test results
+ cd idmtools_platform_local
+ make test-all
+ - name: Upload idmtools_platform_local all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-all-test-ubuntu-setup-curl.yml b/.github/workflows/run-all-test-ubuntu-setup-curl.yml
index a82d89236..9488431d8 100644
--- a/.github/workflows/run-all-test-ubuntu-setup-curl.yml
+++ b/.github/workflows/run-all-test-ubuntu-setup-curl.yml
@@ -20,7 +20,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
- python-version: [3.6, 3.7, 3.8]
+ python-version: [3.8]
steps:
- name: Check out Git repository
uses: actions/checkout@v2
@@ -48,51 +48,51 @@ jobs:
COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
- name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_cli all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_core all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
+ path: idmtools_core/tests/reports/
- name: run idmtools_models tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_models all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
+ path: idmtools_models/tests/results/
- name: run idmtools_platform_comps tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-all
- name: Upload idmtools_platform_comps test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
- name: run idmtools_platform_local tests
run: |
- cd idmtools_platform_local/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_local test results
+ cd idmtools_platform_local
+ make test-all
+ - name: Upload idmtools_platform_local all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-all-test-ubuntu-stage-curl.yml b/.github/workflows/run-all-test-ubuntu-stage-dev-curl.yml
similarity index 76%
rename from .github/workflows/run-all-test-ubuntu-stage-curl.yml
rename to .github/workflows/run-all-test-ubuntu-stage-dev-curl.yml
index 092adbf2c..76da8a454 100644
--- a/.github/workflows/run-all-test-ubuntu-stage-curl.yml
+++ b/.github/workflows/run-all-test-ubuntu-stage-dev-curl.yml
@@ -8,7 +8,7 @@
#
# Note, this test will checkout 'dev' branch
-name: "run-all-test-ubuntu-stage-curl"
+name: "run-all-test-ubuntu-stage-dev-curl"
on:
repository_dispatch:
@@ -48,6 +48,7 @@ jobs:
run: |
pip install idmtools[full] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
pip install idmtools-test --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
+ pip install idmtools[test] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
- name: login to comps2
run: |
python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
@@ -59,52 +60,51 @@ jobs:
echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_cli all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_core all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
+ path: idmtools_core/tests/reports/
- name: run idmtools_models tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_models all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
+ path: idmtools_models/tests/results/
- name: run idmtools_platform_comps tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-all
- name: Upload idmtools_platform_comps test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
- name: run idmtools_platform_local tests
run: |
- cd idmtools_platform_local/tests
- export DOCKER_REPO=idm-docker-staging
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_local test results
+ cd idmtools_platform_local
+ make test-all
+ - name: Upload idmtools_platform_local all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-all-test-ubuntu-setup.yml b/.github/workflows/run-all-test-ubuntu-stage-setup.yml
similarity index 50%
rename from .github/workflows/run-all-test-ubuntu-setup.yml
rename to .github/workflows/run-all-test-ubuntu-stage-setup.yml
index 30520b8a6..94bf8f7c8 100644
--- a/.github/workflows/run-all-test-ubuntu-setup.yml
+++ b/.github/workflows/run-all-test-ubuntu-stage-setup.yml
@@ -1,17 +1,20 @@
-# run setup-dev to install packages and run smoke test
-# trigger by push or pull_request with commit message: "Run smoke test!"
+# install staging packages and run all test
+# trigger by push or pull_request with commit message: "Run tests!".
-name: "run-all-test-ubuntu-setup"
-on: [push, pull_request]
+name: "run-setup-dev-all-tests"
+on: [ push, pull_request ]
jobs:
- run-smoke-test-ubuntu-setup:
+ run-all-test-ubuntu-stage:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest]
- python-version: [3.6, 3.7, 3.8]
- if: "contains(github.event.head_commit.message, 'Run all u test!')"
+ os: [ ubuntu-latest ]
+ python-version: [3.7]
+ env:
+ bamboo_user: ${{ secrets.PYPI_STAGING_USERNAME }}
+ bamboo_password: ${{ secrets.PYPI_STAGING_PASSWORD }}
+ if: "contains(github.event.head_commit.message, 'Run tests!')"
steps:
- name: Check out Git repository
uses: actions/checkout@v2
@@ -22,11 +25,9 @@ jobs:
architecture: x64
- name: Install Python dependencies
run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Run setup-dev
- env:
- bamboo_UserArtifactory: ${{ secrets.PYPI_STAGING_USERNAME }}
- bamboo_PasswordArtifactory: ${{ secrets.PYPI_STAGING_PASSWORD }}
+ pip install idm-buildtools flake8 wheel pygit2 --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
+
+ - name: install idmtools
run: |
make setup-dev
- name: login to comps2
@@ -35,53 +36,56 @@ jobs:
env:
COMPS_USER: ${{ secrets.COMPS_USER }}
COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
+ - name: login to docker staging
+ run: |
+ echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_cli all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_core all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
+ path: idmtools_core/tests/reports/
- name: run idmtools_models tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_models all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
+ path: idmtools_models/tests/results/
- name: run idmtools_platform_comps tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-all
- name: Upload idmtools_platform_comps test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
- name: run idmtools_platform_local tests
run: |
- cd idmtools_platform_local/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_local test results
+ cd idmtools_platform_local
+ make test-all
+ - name: Upload idmtools_platform_local all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-all-test-ubuntu-stage.yml b/.github/workflows/run-all-test-ubuntu-stage.yml
index 6bdc5fdec..976fd0a46 100644
--- a/.github/workflows/run-all-test-ubuntu-stage.yml
+++ b/.github/workflows/run-all-test-ubuntu-stage.yml
@@ -2,15 +2,15 @@
# trigger by push or pull_request with commit message: "Run ubuntu all test!".
name: "run-all-test-ubuntu-stage"
-on: [push, pull_request]
+on: [ push, pull_request ]
jobs:
run-all-test-ubuntu-stage:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest]
- python-version: [3.6, 3.7, 3.8]
+ os: [ ubuntu-latest ]
+ python-version: [ 3.6, 3.7, 3.8 ]
env:
bamboo_user: ${{ secrets.PYPI_STAGING_USERNAME }}
bamboo_password: ${{ secrets.PYPI_STAGING_PASSWORD }}
@@ -25,7 +25,7 @@ jobs:
architecture: x64
- name: Install Python dependencies
run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
+ pip install idm-buildtools flake8 wheel pygit2 --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: Install dataclass
if: ${{ matrix.python-version }} == 3.6
run: |
@@ -34,9 +34,10 @@ jobs:
run: |
pip install idmtools[full] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
pip install idmtools-test --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
+ pip install idmtools[test] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
- name: login to comps2
run: |
- python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
+ python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
env:
COMPS_USER: ${{ secrets.COMPS_USER }}
COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
@@ -45,52 +46,85 @@ jobs:
echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_cli all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_core all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
+ path: idmtools_core/tests/reports/
- name: run idmtools_models tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_models all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
+ path: idmtools_models/tests/results/
- name: run idmtools_platform_comps tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-all
- name: Upload idmtools_platform_comps test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
+
+ run-local:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ ubuntu-latest ]
+ python-version: [ 3.7]
+ env:
+ bamboo_user: ${{ secrets.PYPI_STAGING_USERNAME }}
+ bamboo_password: ${{ secrets.PYPI_STAGING_PASSWORD }}
+ if: "contains(github.event.head_commit.message, 'Run ubuntu all test!')"
+ steps:
+ - name: Check out Git repository
+ uses: actions/checkout@v2
+ - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ architecture: x64
+ - name: Install Python dependencies
+ run: |
+ pip install idm-buildtools flake8 wheel pygit2 --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
+ - name: Install dataclass
+ if: ${{ matrix.python-version }} == 3.6
+ run: |
+ pip install dataclasses
+ - name: download idmtools packages from staging jfrog
+ run: |
+ pip install idmtools[full] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
+ pip install idmtools-test --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
+ pip install idmtools[test] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
+ - name: login to docker staging
+ run: |
+ echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- name: run idmtools_platform_local tests
run: |
- cd idmtools_platform_local/tests
- export DOCKER_REPO=idm-docker-staging
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_local test results
+ cd idmtools_platform_local
+ make test-all
+ - name: Upload idmtools_platform_local all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-all-test-win-staging.yml b/.github/workflows/run-all-test-win-staging.yml
deleted file mode 100644
index d7c555ba3..000000000
--- a/.github/workflows/run-all-test-win-staging.yml
+++ /dev/null
@@ -1,95 +0,0 @@
-# run setup-dev to install packages and run smoke test
-# trigger by push or pull_request with commit message: "Run all win test!"
-
-name: "run-all-test-win-staging"
-on: [push, pull_request]
-
-jobs:
- run-all-test-win-staging:
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [windows-latest]
- python-version: [3.6, 3.7, 3.8]
- env:
- bamboo_user: ${{ secrets.PYPI_STAGING_USERNAME }}
- bamboo_password: ${{ secrets.PYPI_STAGING_PASSWORD }}
- if: "contains(github.event.head_commit.message, 'Run all win test!')"
- steps:
- - name: Check out Git repository
- uses: actions/checkout@v2
- - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- architecture: x64
- - name: upgrade pip
- run: |
- python -m pip install --upgrade pip
- - name: Install Python dependencies
- run: |
- pip install idm-buildtools flake8 coloredlogs wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Install dataclass
- if: ${{ matrix.python-version }} == 3.6
- run: |
- pip install dataclasses
- - name: download idmtools packages from staging jfrog
- run: |
- pip install idmtools[full] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple --verbose
- pip install idmtools-test --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple --verbose
- - name: login to comps2
- run: |
- python dev_scripts/create_auth_token_args.py --username ${{ secrets.COMPS_USER }} --password ${{ secrets.COMPS_PASSWORD }}
- - name: login to docker staging
- run: |
- echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- - name: run idmtools_cli tests
- run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
- - name: run idmtools_core tests
- run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
- - name: run idmtools_models tests
- run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
- - name: run idmtools_platform_comps tests
- run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_comps test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
-# - name: run idmtools_platform_local tests
-# run: |
-# cd idmtools_platform_local/tests
-# py.test --durations=3 -v --junitxml=test_results.xml
-# - name: Upload idmtools_platform_local test results
-# uses: actions/upload-artifact@v2
-# if: failure()
-# with:
-# name: idmtools_platform_local_test_results
-# path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
diff --git a/.github/workflows/run-master-prod-win-test-curl.yml b/.github/workflows/run-master-prod-win-test-curl.yml
deleted file mode 100644
index d09d074d4..000000000
--- a/.github/workflows/run-master-prod-win-test-curl.yml
+++ /dev/null
@@ -1,99 +0,0 @@
-# This workflows will download idmtools nightly packages from production artifactory then run tests with curl command
-#
-# curl command from your cmd:
-# curl -XPOST -u "username:githubPAT" -H "Accept: application/vnd.github.everest-preview+json" \
-# -H "Content-Type: application/json" \
-# --data "{\"event_type\": \"test_master_prod_win\"}" \
-# https://api.github.com/repos/InstituteforDiseaseModeling/idmtools/dispatches
-#
-# Note, this test will checkout 'master' branch in windows
-
-name: "run-master-prod-win-test-curl"
-on:
- repository_dispatch:
- # Manually trigger with curl or Postman POST
- types: [test_master_prod_win]
-
-jobs:
- run-all-test-win-prod-curl:
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [windows-latest]
- python-version: [3.6, 3.7, 3.8]
- if: github.repository == 'InstituteforDiseaseModeling/idmtools'
- steps:
- - name: Check out Git repository
- uses: actions/checkout@v2
- - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- architecture: x64
- - name: upgrade pip
- run: |
- python -m pip install --upgrade pip
- - name: Install Python dependencies
- run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Install dataclass
- if: ${{ matrix.python-version }} == 3.6
- run: |
- pip install dataclasses
- - name: download idmtools packages from production jfrog
- run: |
- pip install idmtools[full] --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- pip install idmtools-test --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: login to comps2
- run: |
- python dev_scripts/create_auth_token_args.py --username ${{ secrets.COMPS_USER }} --password ${{ secrets.COMPS_PASSWORD }}
- - name: run idmtools_cli tests
- run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
- - name: run idmtools_core tests
- run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
- - name: run idmtools_models tests
- run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
- - name: run idmtools_platform_comps tests
- run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_comps test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
-# - name: run idmtools_platform_local tests
-# run: |
-# cd idmtools_platform_local/tests
-# py.test --durations=3 -v --junitxml=test_results.xml
-# - name: Upload idmtools_platform_local test results
-# uses: actions/upload-artifact@v2
-# if: failure()
-# with:
-# name: idmtools_platform_local_test_results
-# path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
diff --git a/.github/workflows/run-nightly-build-smoke-test-cron.yml b/.github/workflows/run-nightly-build-smoke-test-cron.yml
index f6b4a62cd..d63e275a3 100644
--- a/.github/workflows/run-nightly-build-smoke-test-cron.yml
+++ b/.github/workflows/run-nightly-build-smoke-test-cron.yml
@@ -40,6 +40,7 @@ jobs:
run: |
pip install idmtools[full] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
pip install idmtools-test --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
+ pip install idmtools[test] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
- name: login to comps2
run: |
python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
@@ -49,54 +50,53 @@ jobs:
- name: login to docker staging
run: |
echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- - name: run idmtools_cli smoke tests
+ - name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-smoke
- name: Upload idmtools_cli smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core smoke tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-smoke
- name: Upload idmtools_core smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
- - name: run idmtools_models tests
+ path: idmtools_core/tests/reports/
+ - name: run idmtools_models smoke tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-smoke
- name: Upload idmtools_models smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
- - name: run idmtools_platform_comps tests
+ path: idmtools_models/tests/results/
+ - name: run idmtools_platform_comps smoke tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_platform_comps test results
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-smoke
+ - name: Upload idmtools_platform_comps smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
- name: run idmtools_platform_local smoke tests
run: |
- cd idmtools_platform_local/tests
- export DOCKER_REPO=idm-docker-staging
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_platform_local
+ make test-smoke
- name: Upload idmtools_platform_local smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-release-build-test-curl.yml b/.github/workflows/run-release-build-test-curl.yml
deleted file mode 100644
index cd3134f50..000000000
--- a/.github/workflows/run-release-build-test-curl.yml
+++ /dev/null
@@ -1,99 +0,0 @@
-# This workflows will download idmtools nightly packages from production artifactory then run tests with curl command
-#
-# curl command from your cmd:
-# curl -XPOST -u "username:githubPAT" -H "Accept: application/vnd.github.everest-preview+json" \
-# -H "Content-Type: application/json" \
-# --data "{\"event_type\": \"test_release_build\"}" \
-# https://api.github.com/repos/InstituteforDiseaseModeling/idmtools/dispatches
-#
-# Note, this test will checkout 'master' branch
-
-name: "run-release-build-test-curl"
-on:
- repository_dispatch:
- # Manually trigger with above curl or Postman POST
- types: [test_release_build]
-
-jobs:
- run-release-build-test-curl:
- if: github.repository == 'InstituteforDiseaseModeling/idmtools'
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [ubuntu-latest]
- python-version: [3.6, 3.7]
- steps:
- - name: Check out Git repository
- uses: actions/checkout@v2
- - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- architecture: x64
- - name: Install Python dependencies
- run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Install dataclass
- if: ${{ matrix.python-version }} == 3.6
- run: |
- pip install dataclasses
- - name: download idmtools packages from production jfrog
- run: |
- pip install idmtools[full] --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- pip install idmtools-test --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: login to comps2
- run: |
- python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
- env:
- COMPS_USER: ${{ secrets.COMPS_USER }}
- COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
- - name: run idmtools_cli tests
- run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
- - name: run idmtools_core tests
- run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
- - name: run idmtools_models tests
- run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
- - name: run idmtools_platform_comps tests
- run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_comps test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
- - name: run idmtools_platform_local tests
- run: |
- cd idmtools_platform_local/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_local test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
diff --git a/.github/workflows/run-release-prod-test.yml b/.github/workflows/run-release-prod-test.yml
index c54462cb6..c5c60dd8c 100644
--- a/.github/workflows/run-release-prod-test.yml
+++ b/.github/workflows/run-release-prod-test.yml
@@ -1,13 +1,14 @@
-name: "run-release-prod-test"
-on: [push, pull_request]
+# This GA can be triggered with commit message "Run prod ubuntu all test!". it install prod packages with any branch code
+name: "run-all-test-ubuntu-prod"
+on: [ push, pull_request ]
jobs:
- run-release-prod-test:
+ run-all-test-ubuntu-prod:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest]
- python-version: [3.6, 3.7, 3.8]
+ os: [ ubuntu-latest ]
+ python-version: [ 3.6, 3.7, 3.8 ]
if: "contains(github.event.head_commit.message, 'Run prod ubuntu all test!')"
steps:
- name: Check out Git repository
@@ -19,7 +20,7 @@ jobs:
architecture: x64
- name: Install Python dependencies
run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
+ pip install idm-buildtools flake8 wheel pygit2 --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: Install dataclass
if: ${{ matrix.python-version }} == 3.6
run: |
@@ -28,6 +29,7 @@ jobs:
run: |
pip install idmtools[full] --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
pip install idmtools-test --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
+ pip install idmtools[test] --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: login to comps2
run: |
python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
@@ -36,51 +38,51 @@ jobs:
COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
- name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_cli all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_core all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
+ path: idmtools_core/tests/reports/
- name: run idmtools_models tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-all
+ - name: Upload idmtools_models all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
+ path: idmtools_models/tests/results/
- name: run idmtools_platform_comps tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-all
- name: Upload idmtools_platform_comps test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
- name: run idmtools_platform_local tests
run: |
- cd idmtools_platform_local/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_local test results
+ cd idmtools_platform_local
+ make test-all
+ - name: Upload idmtools_platform_local all test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-release-prod-win-test.yml b/.github/workflows/run-release-prod-win-test.yml
deleted file mode 100644
index e51e1661b..000000000
--- a/.github/workflows/run-release-prod-win-test.yml
+++ /dev/null
@@ -1,83 +0,0 @@
-name: "run-release-prod-win-test"
-on: [push, pull_request]
-
-jobs:
- run-release-prod-win-test:
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [windows-latest]
- python-version: [3.6, 3.7, 3.8]
- if: "contains(github.event.head_commit.message, 'Run prod win all test!')"
- steps:
- - name: Check out Git repository
- uses: actions/checkout@v2
- - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- architecture: x64
- - name: Install Python dependencies
- run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Install dataclass
- if: ${{ matrix.python-version }} == 3.6
- run: |
- pip install dataclasses
- - name: download idmtools packages from production jfrog
- run: |
- pip install idmtools[full] --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- pip install idmtools-test --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: login to comps2
- run: |
- python dev_scripts/create_auth_token_args.py --username ${{ secrets.COMPS_USER }} --password ${{ secrets.COMPS_PASSWORD }}
- - name: run idmtools_cli tests
- run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_cli test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
- - name: run idmtools_core tests
- run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_core test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
- - name: run idmtools_models tests
- run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_models test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
- - name: run idmtools_platform_comps tests
- run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml
- - name: Upload idmtools_platform_comps test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
-# - name: run idmtools_platform_local tests
-# run: |
-# cd idmtools_platform_local/tests
-# py.test --durations=3 -v --junitxml=test_results.xml
-# - name: Upload idmtools_platform_local test results
-# uses: actions/upload-artifact@v2
-# if: failure()
-# with:
-# name: idmtools_platform_local_test_results
-# path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
diff --git a/.github/workflows/run-smoke-test-ubuntu-setup.yml b/.github/workflows/run-smoke-test-ubuntu-setup.yml
deleted file mode 100644
index 783ecd9e9..000000000
--- a/.github/workflows/run-smoke-test-ubuntu-setup.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-# run setup-dev to install packages and run smoke test
-# trigger by push or pull_request with commit message: "Run smoke test!"
-
-name: "run-smoke-test-ubuntu-setup"
-on: [push, pull_request]
-
-jobs:
- run-smoke-test-ubuntu-setup:
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [ubuntu-latest]
- python-version: [3.6, 3.7, 3.8]
- if: "contains(github.event.head_commit.message, 'Run smoke test!')"
- steps:
- - name: Check out Git repository
- uses: actions/checkout@v2
- - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- architecture: x64
- - name: Install Python dependencies
- run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Run setup-dev
- env:
- bamboo_UserArtifactory: ${{ secrets.PYPI_STAGING_USERNAME }}
- bamboo_PasswordArtifactory: ${{ secrets.PYPI_STAGING_PASSWORD }}
- run: |
- make setup-dev
- - name: login to comps2
- run: |
- python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
- env:
- COMPS_USER: ${{ secrets.COMPS_USER }}
- COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
- - name: run idmtools_cli smoke tests
- run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_cli smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
- - name: run idmtools_core smoke tests
- run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_core smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
- - name: run idmtools_models tests
- run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_models smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
- - name: run idmtools_platform_comps tests
- run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_platform_comps test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
- - name: run idmtools_platform_local smoke tests
- run: |
- cd idmtools_platform_local/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_platform_local smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
diff --git a/.github/workflows/run-smoke-test-ubuntu-stage.yml b/.github/workflows/run-smoke-test-ubuntu-stage.yml
index 761aa17dd..a64821239 100644
--- a/.github/workflows/run-smoke-test-ubuntu-stage.yml
+++ b/.github/workflows/run-smoke-test-ubuntu-stage.yml
@@ -25,7 +25,7 @@ jobs:
architecture: x64
- name: Install Python dependencies
run: |
- pip install idm-buildtools flake8 wheel --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
+ pip install idm-buildtools flake8 wheel pygit2 --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- name: Install dataclass
if: ${{ matrix.python-version }} == 3.6
run: |
@@ -34,6 +34,7 @@ jobs:
run: |
pip install idmtools[full] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
pip install idmtools-test --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
+ pip install idmtools[test] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple
- name: login to comps2
run: |
python dev_scripts/create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
@@ -43,53 +44,53 @@ jobs:
- name: login to docker staging
run: |
echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- - name: run idmtools_cli smoke tests
+ - name: run idmtools_cli tests
run: |
- cd idmtools_cli/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_cli
+ PARALLEL_TEST_COUNT=2 make test-smoke
- name: Upload idmtools_cli smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_cli_test_results
- path: idmtools_cli/tests/test_results.xml
+ path: idmtools_cli/tests/reports/
- name: run idmtools_core smoke tests
run: |
- cd idmtools_core/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_core
+ PARALLEL_TEST_COUNT=2 make test-smoke
- name: Upload idmtools_core smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_core_test_results
- path: idmtools_core/tests/test_results.xml
- - name: run idmtools_models tests
+ path: idmtools_core/tests/reports/
+ - name: run idmtools_models smoke tests
run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_models
+ PARALLEL_TEST_COUNT=2 make test-smoke
- name: Upload idmtools_models smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_models_test_results
- path: idmtools_models/tests/test_results.xml
- - name: run idmtools_platform_comps tests
+ path: idmtools_models/tests/results/
+ - name: run idmtools_platform_comps smoke tests
run: |
- cd idmtools_platform_comps/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_platform_comps test results
+ cd idmtools_platform_comps
+ PARALLEL_TEST_COUNT=2 make test-smoke
+ - name: Upload idmtools_platform_comps smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps/tests/test_results.xml
+ path: idmtools_platform_comps/tests/results/
- name: run idmtools_platform_local smoke tests
run: |
- cd idmtools_platform_local/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
+ cd idmtools_platform_local
+ make test-smoke
- name: Upload idmtools_platform_local smoke test results
uses: actions/upload-artifact@v2
if: failure()
with:
name: idmtools_platform_local_test_results
- path: idmtools_platform_local/tests/test_results.xml
\ No newline at end of file
+ path: idmtools_platform_local/tests/results/
\ No newline at end of file
diff --git a/.github/workflows/run-smoke-test-win-setup.yml b/.github/workflows/run-smoke-test-win-setup.yml
deleted file mode 100644
index 65646cfd4..000000000
--- a/.github/workflows/run-smoke-test-win-setup.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-# run setup-dev to install packages and run smoke test
-# trigger by push or pull_request with commit message: "Run smoke test!"
-
-name: "run-smoke-test-win-setup"
-on: [push, pull_request]
-
-jobs:
- run-smoke-test-win-setup:
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [windows-latest]
- python-version: [3.6, 3.7, 3.8]
- if: "contains(github.event.head_commit.message, 'Run win setup test!')"
- steps:
- - name: Check out Git repository
- uses: actions/checkout@v2
- - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- architecture: x64
- - name: Install Python dependencies
- run: |
- pip install idm-buildtools flake8 wheel coloredlogs --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Run setup-dev
- env:
- bamboo_UserArtifactory: ${{ secrets.PYPI_STAGING_USERNAME }}
- bamboo_PasswordArtifactory: ${{ secrets.PYPI_STAGING_PASSWORD }}
- run: |
- make setup-dev
- - name: login to comps2
- run: |
- python dev_scripts\create_auth_token_args.py --username $COMPS_USER --password $COMPS_PASSWORD
- env:
- COMPS_USER: ${{ secrets.COMPS_USER }}
- COMPS_PASSWORD: ${{ secrets.COMPS_PASSWORD }}
- - name: run idmtools_cli smoke tests
- run: |
- cd idmtools_cli\tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_cli smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_cli_test_results
- path: idmtools_cli\tests\test_results.xml
- - name: run idmtools_core smoke tests
- run: |
- cd idmtools_core\tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_core smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_core_test_results
- path: idmtools_core\tests\test_results.xml
- - name: run idmtools_models tests
- run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_models smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_models_test_results
- path: idmtools_models\tests\test_results.xml
- - name: run idmtools_platform_comps tests
- run: |
- cd idmtools_platform_comps\tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_platform_comps test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps\tests\test_results.xml
- - name: run idmtools_platform_local smoke tests
- run: |
- cd idmtools_platform_local\tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_platform_local smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_local_test_results
- path: idmtools_platform_local\tests\test_results.xml
\ No newline at end of file
diff --git a/.github/workflows/run-smoke-test-win-staging.yml b/.github/workflows/run-smoke-test-win-staging.yml
deleted file mode 100644
index 9f10ad455..000000000
--- a/.github/workflows/run-smoke-test-win-staging.yml
+++ /dev/null
@@ -1,95 +0,0 @@
-# run setup-dev to install packages and run smoke test
-# trigger by push or pull_request with commit message: "Run win smoke test!"
-
-name: "run-smoke-test-win-staging"
-on: [push, pull_request]
-
-jobs:
- run-smoke-test-win-staging:
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [windows-latest]
- python-version: [3.6, 3.7, 3.8]
- env:
- bamboo_user: ${{ secrets.PYPI_STAGING_USERNAME }}
- bamboo_password: ${{ secrets.PYPI_STAGING_PASSWORD }}
- if: "contains(github.event.head_commit.message, 'Run win smoke test!')"
- steps:
- - name: Check out Git repository
- uses: actions/checkout@v2
- - name: ${{ matrix.os }} Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- architecture: x64
- - name: upgrade pip
- run: |
- python -m pip install --upgrade pip
- - name: Install Python dependencies
- run: |
- pip install idm-buildtools flake8 wheel coloredlogs --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
- - name: Install dataclass
- if: ${{ matrix.python-version }} == 3.6
- run: |
- pip install dataclasses
- - name: download idmtools packages from staging jfrog
- run: |
- pip install idmtools[full] --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple --verbose
- pip install idmtools-test --index-url=https://${{ env.bamboo_user }}:${{ env.bamboo_password }}@packages.idmod.org/api/pypi/pypi-staging/simple --verbose
- - name: login to comps2
- run: |
- python dev_scripts/create_auth_token_args.py --username ${{ secrets.COMPS_USER }} --password ${{ secrets.COMPS_PASSWORD }}
- - name: login to docker staging
- run: |
- echo '${{ env.bamboo_password }}' | docker login idm-docker-staging.packages.idmod.org --username ${{ env.bamboo_user }} --password-stdin
- - name: run idmtools_cli smoke tests
- run: |
- cd idmtools_cli\tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_cli smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_cli_test_results
- path: idmtools_cli\tests\test_results.xml
- - name: run idmtools_core smoke tests
- run: |
- cd idmtools_core\tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_core smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_core_test_results
- path: idmtools_core\tests\test_results.xml
- - name: run idmtools_models tests
- run: |
- cd idmtools_models/tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_models smoke test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_models_test_results
- path: idmtools_models\tests\test_results.xml
- - name: run idmtools_platform_comps tests
- run: |
- cd idmtools_platform_comps\tests
- py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
- - name: Upload idmtools_platform_comps test results
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: idmtools_platform_comps_test_results
- path: idmtools_platform_comps\tests\test_results.xml
-# - name: run idmtools_platform_local smoke tests
-# run: |
-# cd idmtools_platform_local\tests
-# py.test --durations=3 -v --junitxml=test_results.xml -m "smoke"
-# - name: Upload idmtools_platform_local smoke test results
-# uses: actions/upload-artifact@v2
-# if: failure()
-# with:
-# name: idmtools_platform_local_test_results
-# path: idmtools_platform_local\tests\test_results.xml
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 3945bf427..474e5f5ec 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@ CLDIR=$(PDS)clean_dir.py
COVERAGE_PATH=tests/.coverage
help:
- $(PDS)get_help_from_makefile.py
+ help-from-makefile -f $(mkfile_path)
clean: stop-allure ## Clean most common outputs(Logs, Test Results, etc)
-$(MAKEALL) --parallel clean
diff --git a/README.md b/README.md
index a1d9a6a9b..8b8f05ce1 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@
# User Installation
-See Private documentation is located at https://docs.idmod.org/projects/idmtools/en/latest/. Scroll to "Do you have a password? Access here" and use the password "IDM2020idmtools". for complete documentation
+Documentation is located at https://docs.idmod.org/projects/idmtools/en/latest/.
## Recommended install
diff --git a/dev_scripts/get_help_from_makefile.py b/dev_scripts/get_help_from_makefile.py
deleted file mode 100755
index b57406496..000000000
--- a/dev_scripts/get_help_from_makefile.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-import re
-import argparse
-import os
-
-help_pattern = re.compile(r'(^[a-zA-Z\-]+):([a-zA-Z\- ]*)##(.*?)$')
-
-
-def parse_help_from_make(filename):
- with open(filename, 'r') as make_in:
- lines = make_in.readlines()
- help = []
- for line in lines:
- m = help_pattern.match(line)
- if m:
- help.append((m.group(1), m.group(3)))
-
- help = sorted(help, key=lambda x: x[0])
- return help
-
-
-def print_help(help_items):
- print("\n".join([f'{x[0].strip().ljust(20)}:{x[1].strip()}' for x in help_items]))
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument("-f", "--file", default=[], action="append", help="Path to makefile")
- args = parser.parse_args()
-
- if args.file:
- help_items = []
- print(args.file)
- for f in args.file:
- help_items.extend(parse_help_from_make(os.path.abspath(f)))
- else:
- help_items = parse_help_from_make(f'{os.path.join(os.getcwd(), "Makefile")}')
-
- print_help(help_items)
diff --git a/dev_scripts/package_general.mk b/dev_scripts/package_general.mk
index 51dea4657..0daa0c977 100644
--- a/dev_scripts/package_general.mk
+++ b/dev_scripts/package_general.mk
@@ -15,7 +15,7 @@ CLDIR=$(PDS)clean_dir.py
PYPI_URL?=https://packages.idmod.org/api/pypi/idm-pypi-staging/
help:
- $(PDS)get_help_from_makefile.py
+ help-from-makefile -f $(mkfile_path)
clean: ## Clean most of the temp-data from the project
$(MAKE) -C tests clean
@@ -73,6 +73,7 @@ coverage-all: ## Generate a code-coverage report using all tests
$(MAKE) -C tests $@
# Release related rules
+#######################
dist: clean ## build our package
python setup.py sdist
diff --git a/dev_scripts/package_requirements.txt b/dev_scripts/package_requirements.txt
index 7c4539aae..baf89f569 100644
--- a/dev_scripts/package_requirements.txt
+++ b/dev_scripts/package_requirements.txt
@@ -1,7 +1,7 @@
--extra-index-url https://packages.idmod.org/api/pypi/pypi-production/simple
bump2version~=1.0.1
-coverage~=5.3
-flake8~=3.8.4
-idm-buildtools~=1.0.1
-twine~=3.2.0
-wheel
\ No newline at end of file
+coverage>=5.3,<5.6
+flake8~=3.9.0
+idm-buildtools~=1.0.3
+twine~=3.4.1
+wheel
diff --git a/dev_scripts/test_root.mk b/dev_scripts/test_root.mk
index 53e36af0e..291b002cf 100644
--- a/dev_scripts/test_root.mk
+++ b/dev_scripts/test_root.mk
@@ -24,7 +24,7 @@ COVERAGE_OPTS := --cov-config=.coveragerc --cov-branch --cov-append --cov=idmtoo
help: ## This help
- $(PDS)get_help_from_makefile.py -f $(mkfile_path)
+ help-from-makefile -f $(mkfile_path)
clean:
-$(RM) -rf .pytest_cache .test_platform reports assets *.log *.log* *.buildlog __pycache__ *.html *.xml .coverage
diff --git a/docs/_templates/footer.html b/docs/_templates/footer.html
index 4d914bd33..f78f8356d 100644
--- a/docs/_templates/footer.html
+++ b/docs/_templates/footer.html
@@ -1,7 +1,7 @@
{% extends '!footer.html' %}
{% block extrafooter %}
-
idmtools is licensed under the Creative Commons
- Attribution-Noncommercial-ShareAlike 4.0 License.
+idmtools is licensed under the Creative Commons
+ Attribution-ShareAlike 4.0 International License.
Send documentation feedback to feedback@idmod.org. If you have
questions, email support@idmod.org.
Privacy and Cookies Notice |
diff --git a/docs/conf.py b/docs/conf.py
index 1630aee3e..7beedab8c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,6 +20,7 @@
import os
import subprocess
import sys
+from datetime import datetime
import sphinx_rtd_theme
@@ -37,14 +38,15 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'sphinx.ext.imgmath',
+ 'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
'sphinx.ext.todo',
'plantweb.directive',
'sphinxcontrib.programoutput',
- 'sphinx_copybutton'
+ 'sphinx_copybutton',
+ 'sphinx.ext.intersphinx'
]
plantuml = 'plantweb'
@@ -56,8 +58,7 @@
}
autodoc_mock_imports = ['pika',
- 'dramatiq'
- ]
+ 'dramatiq']
napoleon_google_docstring = True
@@ -81,7 +82,7 @@
# General information about the project.
project = u'idmtools'
-copyright = u'2020, Bill & Melinda Gates Foundation. All rights reserved.'
+copyright = f'1999 - {datetime.today().year}, Bill & Melinda Gates Foundation. All rights reserved.'
author = u'Institute for Disease Modeling'
# The version info for the project you're documenting, acts as replacement for
@@ -387,4 +388,9 @@
# Example configuration for intersphinx: refer to the Python standard library.
-# intersphinx_mapping = {'https://docs.python.org/': None}
+intersphinx_mapping = {'python': ('https://docs.python.org/3', None),
+ 'emod_api': ('https://docs.idmod.org/projects/emod-api/en/latest/', None),
+ 'emodpy': ('https://docs.idmod.org/projects/emodpy/en/latest/', None),
+ 'pycomps': ('https://docs.idmod.org/projects/pycomps/en/latest/', None)
+ }
+
diff --git a/docs/index.rst b/docs/index.rst
index b9f5d78bd..290194502 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -8,7 +8,10 @@ from the creation of input files (if required), to calibration of the model to d
and running simulations, through the analysis of results. Modelers can use |IT_s| to run models
locally or send suites of simulations to an HPC or other computing source. This framework is
free, open-source, and model agnostic: it can be used to interact with a variety of models,
-such as custom models written in R or Python, or IDM's own |EMOD_s|.
+such as custom models written in R or Python, or IDM's own |EMOD_s|. Additional functionality
+for interacting with |EMOD_s| is provided in the :doc:`emod_api:emod_api_index` and
+:doc:`emodpy:emodpy_index` packages.
+
|IT_s| workflow
diff --git a/docs/platforms/comps/scheduling.rst b/docs/platforms/comps/scheduling.rst
new file mode 100644
index 000000000..e70a993af
--- /dev/null
+++ b/docs/platforms/comps/scheduling.rst
@@ -0,0 +1,178 @@
+.. _COMPS_Scheduling:
+
+Scheduling
+==========
+
+|IT_s| supports job scheduling on the |COMPS_s| platform, which includes support for multiple scenarios depending upon the scheduling needs of your specific research needs and requirements. For example, you could schedule your simulations to run under a single process on the same node and with a specified number of cores. For more information about this and other supported scenarios, see `Scenarios`_. To use the full scheduling capabilites included within |COMPS_s| you must add the ``workorder.json`` as a transient asset. This is a one time task to complete for your project. For more information about scheduling configuration, see `Configuration`_. `Examples`_ are provided from which you can leverage to help get started and gain a better understanding. `Schemas`_ enumerate the available options that may be included in workorder.json.
+
+
+.. _Scenarios:
+
+Scenarios
+---------
+
+Choosing the correct scheduling scenario will depend upon your specific research needs and requirements. The following lists some of the common scenarios supported:
+
+* N cores, N processes - useful for single-threaded or MPI-enabled workloads, such as |EMOD_s|.
+* N cores, 1 node, 1 process - useful for models that want to spawn various worker thread (GenEpi) or have large memory usage, where the number of cores being an indicator of memory usage.
+* 1 node, N processes - useful for models with high migration and interprocess communication. By running on the same node MPI can use shared memory, as opposed to slower tcp sockets over multiple nodes. This may be useful for some scenarios using |EMOD_s| or other MPI-enabled workloads.
+
+
+.. _Configuration:
+
+Configuration
+-------------
+
+By configuring a ``workorder.json`` file and adding it as a transient asset you can take advantage of the full scheduling support provided with |COMPS_s|. Scheduling information included in the workorder.json file will take precedent over any scheduling information you may have in the idmtools.ini file or scheduling parameters passed to :py:class:`~idmtools.core.platform_factory.Platform`. The following examples shows some of the options available to include in a workorder.json file.
+
+**Example workorder.json for HPC clusters**::
+
+ {
+ "Command": "python -c \"print('hello test')\"",
+ "NodeGroupName": "idm_abcd",
+ "NumCores": 1,
+ "SingleNode": false,
+ "Exclusive": false
+ }
+
+**Example workorder.json for SLURM clusters**::
+
+ {
+ "Command": "python3 Assets/model1.py",
+ "NodeGroupName": "idm_abcd",
+ "NumCores": 1,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2",
+ "PYTHONPATH": "$PYTHONPATH:$PWD/Assets:$PWD/Assets/site-packages",
+ "PATH": "$PATH:$PWD/Assets:$PWD/Assets/site-packages"
+ }
+ }
+
+In addition to including a workorder.json file you must also set and pass ``scheduling=True`` parameter when running simulations, for example::
+
+ experiment.run(scheduling=True)
+
+Add workorder.json as a transient asset
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To include the workorder.json file as a transient asset you can either add an existing workorder.json using the ``add_work_order`` method or dynamically create one using the ``add_schedule_config`` method, both methods included in the :py:class:`~idmtools.idmtools_platform_comps.utils.Scheduling` class.
+
+**Add existing workorder.json**::
+
+ add_work_order(ts, file_path=os.path.join(COMMON_INPUT_PATH, "scheduling", "slurm", "WorkOrder.json"))
+
+**Dynamically create workorder.json**::
+
+ add_schedule_config(ts, command="python -c \"print('hello test')\"", node_group_name='idm_abcd', num_cores=2,
+ NumProcesses=1, NumNodes=1,
+ Environment={"key1": "value1", "key2:": "value2"})
+
+
+.. _Examples:
+
+Example
+-------
+
+For addition information and specifics of using a workorder.json file within Python, you can begin with the following:
+
+.. literalinclude:: ../../../examples/python_model/python_sim_scheduling_hpc.py
+ :language: python
+
+To see the list of platform alias', such as BELEGOST and CALCULON, use the following CLI command: ``idmtools info plugins platform-aliases``.
+
+.. _Schemas:
+
+Schemas
+-------
+
+The following schemas, for both HPC and SLURM clusters on |COMPS_s|, list the available options you are able to include within the workorder.json file.
+
+**HPC**::
+
+ {
+ "title": "MSHPC job WorkOrder Schema",
+ "$schema": "http://json-schema.org/draft-04/schema",
+ "type": "object",
+ "required": [
+ "Command"
+ ],
+ "properties": {
+ "Command": {
+ "type": "string",
+ "minLength": 1,
+ "description": "The command to run, including binary and all arguments"
+ },
+ "NodeGroupName": {
+ "type": "string",
+ "minLength": 1,
+ "description": "The cluster node-group to commission the job to"
+ },
+ "NumCores": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The number of cores to reserve"
+ },
+ "SingleNode": {
+ "type": "boolean",
+ "description": "A flag to limit all reserved cores to being on the same compute node"
+ },
+ "Exclusive": {
+ "type": "boolean",
+ "description": "A flag that controls whether nodes should be exclusively allocated to this job"
+ }
+ },
+ "additionalProperties": false
+ }
+
+**SLURM**::
+
+ {
+ "title": "SLURM job WorkOrder Schema",
+ "$schema": "http://json-schema.org/draft-04/schema",
+ "type": "object",
+ "required": [
+ "Command"
+ ],
+ "properties": {
+ "Command": {
+ "type": "string",
+ "minLength": 1,
+ "description": "The command to run, including binary and all arguments"
+ },
+ "NodeGroupName": {
+ "type": "string",
+ "minLength": 1,
+ "description": "The cluster node-group to commission to"
+ },
+ "NumCores": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The number of cores to reserve"
+ },
+ "NumNodes": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The number of nodes to schedule"
+ },
+ "NumProcesses": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The number of processes to execute"
+ },
+ "EnableMpi": {
+ "type": "boolean",
+ "description": "A flag that controls whether to run the job with mpiexec (i.e. whether the job will use MPI)"
+ },
+ "Environment": {
+ "type": "object",
+ "description": "Environment variables to set in the job environment; these can be dynamically expanded (e.g. $PATH)",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "additionalProperties": false
+ }
diff --git a/docs/platforms/platforms-comps.rst b/docs/platforms/platforms-comps.rst
index dac4ab227..058a82afe 100644
--- a/docs/platforms/platforms-comps.rst
+++ b/docs/platforms/platforms-comps.rst
@@ -20,4 +20,5 @@ Utilities Unique to COMPS
comps/assetize_output
comps/download.rst
comps/errors.rst
- comps/singularity_build.rst
+ comps/scheduling.rst
+ comps/singularity_build.rst
\ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
index a8b899352..0de3b8c3c 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,8 +1,8 @@
plantweb~=1.2.1
-pygit2~=1.4.0
-PyGithub~=1.53
+pygit2>=1.4.0,<1.6.0
+PyGithub~=1.54
sphinx-copybutton~=0.3.1
-sphinx-rtd-theme~=0.5.0
+sphinx-rtd-theme~=0.5.1
sphinxcontrib-napoleon~=0.7
sphinxcontrib-programoutput~=0.16
-sphinx~=3.3.1
\ No newline at end of file
+sphinx~=3.5.2
diff --git a/examples/python_model/command_task_sweep_scheduling.py b/examples/python_model/command_task_sweep_scheduling.py
new file mode 100644
index 000000000..3d89c0384
--- /dev/null
+++ b/examples/python_model/command_task_sweep_scheduling.py
@@ -0,0 +1,44 @@
+import os
+import sys
+from functools import partial
+from idmtools.builders import SimulationBuilder
+from idmtools.core.platform_factory import Platform
+from idmtools.entities import CommandLine
+from idmtools.entities.command_task import CommandTask
+from idmtools.entities.experiment import Experiment
+from idmtools.entities.templated_simulation import TemplatedSimulations
+from idmtools_platform_comps.utils.scheduling import default_add_workerorder_sweep_callback
+
+
+# Update each sweep parameter in simulation and add to command line argument to command
+def set_value(simulation, name, value):
+ fix_value = round(value, 2) if isinstance(value, float) else value
+ # add argument
+ simulation.task.command.add_raw_argument(str(fix_value))
+ # add tag with our value
+ simulation.tags[name] = fix_value
+
+
+# create command line
+command = CommandLine("python3 Assets/commandline_model.py")
+# create CommandTask
+task = CommandTask(command=command)
+ts = TemplatedSimulations(base_task=task)
+
+sb = SimulationBuilder()
+sb.add_sweep_definition(partial(set_value, name="pop_size"), [10000, 20000])
+sb.add_sweep_definition(partial(set_value, name="pop_infected"), [10, 100])
+sb.add_sweep_definition(partial(set_value, name="n_days"), [100, 110])
+sb.add_sweep_definition(partial(set_value, name="rand_seed"), [1234, 4567])
+sb.add_sweep_definition(partial(default_add_workerorder_sweep_callback, file_name="WorkOrder.json"),
+ os.path.join("inputs", "scheduling", "WorkOrder_orig.json"))
+
+ts.add_builder(sb)
+
+experiment = Experiment.from_template(ts, name=os.path.split(sys.argv[0])[1])
+experiment.add_asset(os.path.join("inputs", "scheduling", "commandline_model.py"))
+
+with Platform('CALCULON') as platform:
+ experiment.run(wait_on_done=True, scheduling=True)
+ # use system status as the exit code
+ sys.exit(0 if experiment.succeeded else -1)
diff --git a/examples/python_model/inputs/csv_inputs/Assets/model.py b/examples/python_model/inputs/python/Assets/model.py
similarity index 100%
rename from examples/python_model/inputs/csv_inputs/Assets/model.py
rename to examples/python_model/inputs/python/Assets/model.py
diff --git a/examples/python_model/inputs/csv_inputs/Assets/model1.py b/examples/python_model/inputs/python/Assets/model1.py
similarity index 100%
rename from examples/python_model/inputs/csv_inputs/Assets/model1.py
rename to examples/python_model/inputs/python/Assets/model1.py
diff --git a/examples/python_model/inputs/csv_inputs/MyExternalLibrary/__init__.py b/examples/python_model/inputs/python/MyExternalLibrary/__init__.py
similarity index 100%
rename from examples/python_model/inputs/csv_inputs/MyExternalLibrary/__init__.py
rename to examples/python_model/inputs/python/MyExternalLibrary/__init__.py
diff --git a/examples/python_model/inputs/csv_inputs/MyExternalLibrary/functions.py b/examples/python_model/inputs/python/MyExternalLibrary/functions.py
similarity index 100%
rename from examples/python_model/inputs/csv_inputs/MyExternalLibrary/functions.py
rename to examples/python_model/inputs/python/MyExternalLibrary/functions.py
diff --git a/examples/python_model/inputs/python_model_with_deps/WorkOrder.json b/examples/python_model/inputs/python_model_with_deps/WorkOrder.json
new file mode 100644
index 000000000..a8ceac98d
--- /dev/null
+++ b/examples/python_model/inputs/python_model_with_deps/WorkOrder.json
@@ -0,0 +1,11 @@
+{
+ "Command": "python3 Assets/model.py --config config.json",
+ "NodeGroupName": "idm_cd",
+ "NumCores": 4,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2"
+ }
+}
\ No newline at end of file
diff --git a/examples/python_model/inputs/scheduling/WorkOrder.json b/examples/python_model/inputs/scheduling/WorkOrder.json
new file mode 100644
index 000000000..6929787e4
--- /dev/null
+++ b/examples/python_model/inputs/scheduling/WorkOrder.json
@@ -0,0 +1,13 @@
+{
+ "Command": "python3 Assets/model1.py",
+ "NodeGroupName": "idm_abcd",
+ "NumCores": 1,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2",
+ "PYTHONPATH": "$PYTHONPATH:$PWD/Assets:$PWD/Assets/site-packages",
+ "PATH": "$PATH:$PWD/Assets:$PWD/Assets/site-packages"
+ }
+}
\ No newline at end of file
diff --git a/examples/python_model/inputs/scheduling/WorkOrder_orig.json b/examples/python_model/inputs/scheduling/WorkOrder_orig.json
new file mode 100644
index 000000000..ba7952625
--- /dev/null
+++ b/examples/python_model/inputs/scheduling/WorkOrder_orig.json
@@ -0,0 +1,12 @@
+{
+ "Command": "python3 Assets/commandline_model.py {pop_size} {pop_infected} {n_days} {rand_seed}",
+ "NodeGroupName": "idm_abcd",
+ "NumCores": 1,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "EnableMpi": false,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2"
+ }
+}
\ No newline at end of file
diff --git a/examples/python_model/inputs/scheduling/commandline_model.py b/examples/python_model/inputs/scheduling/commandline_model.py
new file mode 100644
index 000000000..dfe889aa9
--- /dev/null
+++ b/examples/python_model/inputs/scheduling/commandline_model.py
@@ -0,0 +1,22 @@
+import sys
+import time
+
+
+def test_sweep(pop_size=10000, pop_infected=10, n_days=120, rand_seed=1, pop_type='hybrid'):
+ pars = {
+ "pop_size": pop_size, # Population size
+ "pop_infected": pop_infected, # Number of initial infections
+ "n_days": n_days, # Number of days to simulate
+ "rand_seed": rand_seed, # Random seed
+ "pop_type": pop_type, # Population to use -- "hybrid" is random with household, school,and work structure
+ }
+ print(str(pars))
+
+
+if __name__ == "__main__":
+ pop_size= sys.argv[1]
+ pop_infected = sys.argv[2]
+ n_days = int(sys.argv[3])
+ rand_seed = sys.argv[4]
+
+ test_sweep(pop_size=pop_size, pop_infected=pop_infected, n_days=n_days, rand_seed=rand_seed, pop_type='hybrid')
\ No newline at end of file
diff --git a/examples/python_model/inputs/scheduling/hpc/WorkOrder.json b/examples/python_model/inputs/scheduling/hpc/WorkOrder.json
new file mode 100644
index 000000000..5c3b4875c
--- /dev/null
+++ b/examples/python_model/inputs/scheduling/hpc/WorkOrder.json
@@ -0,0 +1,7 @@
+{
+ "Command": "python -c \"print('hello test')\"",
+ "NodeGroupName": "emod_abcd",
+ "NumCores": 1,
+ "SingleNode": false,
+ "Exclusive": false
+}
\ No newline at end of file
diff --git a/examples/python_model/python_csv_output.py b/examples/python_model/python_csv_output.py
index 4402029b6..b7d9e1bc5 100644
--- a/examples/python_model/python_csv_output.py
+++ b/examples/python_model/python_csv_output.py
@@ -22,11 +22,11 @@
with platform('BELEGOST'):
# define our base task as a python model with json config
base_task = JSONConfiguredPythonTask(
- script_path=os.path.join("inputs", "csv_inputs", "Assets", "model.py"),
+ script_path=os.path.join("inputs", "python", "Assets", "model.py"),
# set the default parameters to 0
parameters=(dict(c=0)),
# add some experiment level assets
- common_assets=AssetCollection.from_directory(os.path.join("inputs", "csv_inputs", "Assets"))
+ common_assets=AssetCollection.from_directory(os.path.join("inputs", "python", "Assets"))
)
# create a templating object using the base task
diff --git a/examples/python_model/python_sim_scheduling.py b/examples/python_model/python_sim_scheduling.py
new file mode 100644
index 000000000..7b1a4dc54
--- /dev/null
+++ b/examples/python_model/python_sim_scheduling.py
@@ -0,0 +1,71 @@
+"""
+In this example, we will demonstrate how to use WorkOrder.json to override simulation command with comp's scheduling
+feature. also show how to use WorkOrder's Environment field to set PYTHONPATH for model.
+ in COMPS, file layout is:
+ Assets-
+ |_MyExternalLibarary
+ |_function.py
+ |_model1.py
+ |_site-packages
+ |_numpy
+ in order for model1.py to call MyExternalLibarary.function which uses numpy package, MyExternalLibarary.function
+ and numpy must be in PYTHONPATH
+ So we add "PYTHONPATH": "$PYTHONPATH:$PWD/Assets:$PWD/Assets/site-packages" in WorkOrder.json
+ the command also define in WorkOrder.json as: "Command": "python3 Assets/model1.py". you can define other fields
+ in WorkOrder, like NumCores, NumProcesses etc.
+"""
+
+
+import os
+import sys
+
+from idmtools.assets import AssetCollection
+from idmtools.builders import SimulationBuilder
+from idmtools.core.platform_factory import Platform
+from idmtools.entities.experiment import Experiment
+from idmtools.entities.templated_simulation import TemplatedSimulations
+from idmtools_models.python.json_python_task import JSONConfiguredPythonTask
+from idmtools_platform_comps.utils.scheduling import add_work_order
+from idmtools_platform_comps.utils.python_requirements_ac.requirements_to_asset_collection import \
+ RequirementsToAssetCollection
+
+with Platform('CALCULON') as platform:
+ # install numpy package to cluster
+ pl = RequirementsToAssetCollection(platform, pkg_list=['numpy==1.19.5'])
+ ac_id = pl.run()
+ # add numpy to common_assets
+ common_assets1 = AssetCollection.from_id(ac_id, as_copy=True)
+ # add input folder to common_assets
+ common_assets2 = AssetCollection.from_directory(os.path.join("inputs", "python", "MyExternalLibrary"), relative_path="MyExternalLibrary")
+ # add both together
+ common_assets = common_assets1 + common_assets2
+
+ # create json config task which generates config.json and add model script and common assets to comps experiment
+ task = JSONConfiguredPythonTask(
+ script_path=os.path.join("inputs", "python", "Assets", "model1.py"),
+ # set default parameters
+ parameters=dict(c=0),
+ # set a parameter envelope
+ envelope="parameters",
+ # add some experiment level assets
+ common_assets=common_assets
+ )
+
+ # create templatedsimulation
+ ts = TemplatedSimulations(base_task=task)
+
+ # add WorkOrder.json to each simulation as transient_assets
+ add_work_order(ts, file_path=os.path.join("inputs", "scheduling", "WorkOrder.json"))
+
+ # create build and define our sweeps
+ builder = SimulationBuilder()
+ builder.add_sweep_definition(JSONConfiguredPythonTask.set_parameter_partial("a"), range(3))
+ builder.add_sweep_definition(JSONConfiguredPythonTask.set_parameter_partial("b"), [1, 2])
+ # add builder to templatedsimulation
+ ts.add_builder(builder)
+ # create experiment
+ e = Experiment.from_template(ts, name=os.path.split(sys.argv[0])[1], tags=dict(tag1=1))
+ # run experiment with scheduling
+ e.run(wait_until_done=True, scheduling=True)
+ # use system status as the exit code
+ sys.exit(0 if e.succeeded else -1)
diff --git a/examples/python_model/python_sim_scheduling_hpc.py b/examples/python_model/python_sim_scheduling_hpc.py
new file mode 100644
index 000000000..82fcdbb99
--- /dev/null
+++ b/examples/python_model/python_sim_scheduling_hpc.py
@@ -0,0 +1,85 @@
+# In this example, we will demonstrate how to run use WorkOrder.json to create simulation in mshpc cluster
+# if use WorkOrder.json correctly, it will create simulations based on the Command in WorkOrder.json. all commands from
+# task will get ignored
+
+import os
+import sys
+from functools import partial
+from typing import Any, Dict
+
+from idmtools.builders import SimulationBuilder
+from idmtools.core.platform_factory import Platform
+from idmtools.entities.experiment import Experiment
+from idmtools.entities.simulation import Simulation
+from idmtools.entities.templated_simulation import TemplatedSimulations
+from idmtools_models.python.json_python_task import JSONConfiguredPythonTask
+from idmtools_platform_comps.utils.scheduling import add_work_order
+
+# first define our base task. please see the detail explanation in examples/python_models/python_sim.py
+# if we do not use WorkOrder.json, this task will create simulation command run as "python Assets/model.py" in comps
+# but for this example, we will use WorkOrder.json to override this command, so here the task's script can be anything
+task = JSONConfiguredPythonTask(script_path=os.path.join("inputs", "python_model_with_deps", "Assets", "model.py"),
+ parameters=(dict(c=0)))
+
+# now let's use this task to create a TemplatedSimulation builder. This will build new simulations from sweep builders
+# we will define later. We can also use it to manipulate the base_task or the base_simulation
+ts = TemplatedSimulations(base_task=task)
+
+# We can define common metadata like tags across all the simulations using the base_simulation object
+ts.base_simulation.tags['tag1'] = 1
+
+# load WorkOrder.json file from local to each simulation via task. the actual command in comps will contain in this file
+add_work_order(ts, file_path=os.path.join("inputs", "scheduling", "hpc", "WorkOrder.json"))
+
+# Since we have our templated simulation object now, let's define our sweeps
+# To do that we need to use a builder
+builder = SimulationBuilder()
+
+
+# define an utility function that will update a single parameter at a
+# time on the model and add that param/value pair as a tag on our simulation.
+def param_update(simulation: Simulation, param: str, value: Any) -> Dict[str, Any]:
+ """
+ This function is called during sweeping allowing us to pass the generated sweep values to our Task Configuration
+
+ We always receive a Simulation object. We know that simulations all have tasks and that for our particular set
+ of simulations they will all include JSONConfiguredPythonTask. We configure the model with calls to set_parameter
+ to update the config. In addition, we are can return a dictionary of tags to add to the simulations so we return
+ the output of the 'set_parameter' call since it returns the param/value pair we set
+
+ Args:
+ simulation: Simulation we are configuring
+ param: Param string passed to use
+ value: Value to set param to
+
+ Returns:
+
+ """
+ return simulation.task.set_parameter(param, value)
+
+
+# now add the sweep to our builder
+builder.add_sweep_definition(partial(param_update, param="a"), range(3))
+builder.add_sweep_definition(partial(param_update, param="b"), [1, 2, 3])
+ts.add_builder(builder)
+
+# Now we can create our Experiment using our template builder
+experiment = Experiment.from_template(ts, name=os.path.split(sys.argv[0])[1])
+# Add our own custom tag to simulation
+experiment.tags["tag1"] = 1
+# And maybe some custom Experiment Level Assets
+experiment.assets.add_directory(assets_directory=os.path.join("inputs", "python_model_with_deps", "Assets"))
+
+with Platform('BELEGOST') as platform:
+ # Call run() with 'scheduling=True' to run simulations with scheduling using WorkOrder.json(loaded above)
+ # There are few ways to schedule computation resources in COMPS:
+ # 1. add_work_order() method to add WorkOrder.json file to simulations as transient asset
+ # 2. add_schedule_config() method can be used to add dynamic WorkOrder.json to simulations as transient asset
+ # 3. add additional parameters to Platform creation with Platform(**kwargs) in kwargs
+ # 4. idmtools.ini
+ # the order of precedence is WorkOrder.json > Platform() > idmtools.ini
+ # with experiment.run method, you can also passin other options like 'priority=Highest' here to override any
+ # priority value either passed in from idmtools.ini or defined in Platform(**kwargs)
+ experiment.run(True, scheduling=True, priority='Highest')
+ # use system status as the exit code
+ sys.exit(0 if experiment.succeeded else -1)
diff --git a/examples/python_model/python_with_envelope_csv_output.py b/examples/python_model/python_with_envelope_csv_output.py
deleted file mode 100644
index 184094947..000000000
--- a/examples/python_model/python_with_envelope_csv_output.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Example Python Experiment
-# In this example, we will demonstrate how to run a python experiment.
-
-# First, import some necessary system and idmtools packages.
-# - SimulationBuilder: To create sweeps
-# - TemplatedSimulations: To create simulations from our templated task and builder
-# - Platform: To specify the platform you want to run your experiment on
-# - JSONConfiguredPythonTask: We want to run an experiment executing a Python script with a json config
-import os
-import sys
-from idmtools.assets import AssetCollection
-from idmtools.builders import SimulationBuilder
-from idmtools.core.platform_factory import platform
-from idmtools.entities.experiment import Experiment
-from idmtools.entities.templated_simulation import TemplatedSimulations
-from idmtools_models.python.json_python_task import JSONConfiguredPythonTask
-
-# In order to run the experiment, we need to create a `Platform` and an `ExperimentManager`.
-# The `Platform` defines where we want to run our simulation.
-# You can easily switch platforms by changing the Platform to for example 'Local'
-with platform('BELEGOST'):
- base_task = JSONConfiguredPythonTask(
- script_path=os.path.join("inputs", "csv_inputs", "Assets", "model1.py"),
- # set default parameters
- parameters=dict(c=0),
- # set a parameter envelope
- envelope="parameters",
- # add some experiment level assets
- common_assets=AssetCollection.from_directory(os.path.join("inputs", "csv_inputs"))
- )
-
- ts = TemplatedSimulations(base_task=base_task)
-
- # define our sweeps
- builder = SimulationBuilder()
- builder.add_sweep_definition(JSONConfiguredPythonTask.set_parameter_partial("a"), range(3))
- builder.add_sweep_definition(JSONConfiguredPythonTask.set_parameter_partial("b"), [1, 2, 3])
-
- ts.add_builder(builder)
-
- e = Experiment.from_template(ts, name=os.path.split(sys.argv[0])[1], tags=dict(tag1=1))
-
- e.run(wait_until_done=True)
- # use system status as the exit code
- sys.exit(0 if e.succeeded else -1)
diff --git a/examples/requirements.txt b/examples/requirements.txt
index d2380a6be..5e823f984 100644
--- a/examples/requirements.txt
+++ b/examples/requirements.txt
@@ -1,4 +1,4 @@
-docker==4.3.1
+docker>=4.3.1,<4.5.0
pipreqs==0.4.10
dramatiq[redis]
-pyCOMPS
\ No newline at end of file
+pyCOMPS
diff --git a/examples/singularity/covasim/WorkOrder_orig.json b/examples/singularity/covasim/WorkOrder_orig.json
new file mode 100644
index 000000000..0dc7e47e3
--- /dev/null
+++ b/examples/singularity/covasim/WorkOrder_orig.json
@@ -0,0 +1,11 @@
+{
+ "Command": "singularity exec ./Assets/covasim_ubuntu.sif python3 Assets/run_sim_sweep.py {pop_size} {pop_infected} {n_days} {rand_seed}",
+ "NodeGroupName": "idm_cd",
+ "NumCores": 1,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2"
+ }
+}
\ No newline at end of file
diff --git a/examples/singularity/covasim/run_covasim_sweep.py b/examples/singularity/covasim/run_covasim_sweep.py
index 6754267e0..10f6e63c9 100644
--- a/examples/singularity/covasim/run_covasim_sweep.py
+++ b/examples/singularity/covasim/run_covasim_sweep.py
@@ -1,55 +1,42 @@
import os
import sys
from functools import partial
-
from idmtools.assets import AssetCollection
from idmtools.builders import SimulationBuilder
from idmtools.core.platform_factory import Platform
from idmtools.entities import CommandLine
from idmtools.entities.command_task import CommandTask
from idmtools.entities.experiment import Experiment
-from idmtools.entities.iplatform import IPlatform
-from idmtools.entities.simulation import Simulation
-
-command_format_str = "singularity exec ./Assets/covasim_ubuntu.sif python3 Assets/run_sim_sweep.py {pop_size} {pop_infected} {n_days} {rand_seed}"
-
-def create_config_before_provisioning(simulation: Simulation, platform: IPlatform):
- # set the command dynamically
- simulation.task.command = CommandLine.from_string(command_format_str.format(**simulation.task.config))
-
+from idmtools.entities.templated_simulation import TemplatedSimulations
def set_value(simulation, name, value):
- simulation.task.config[name] = round(value, 2) if isinstance(value, float) else value
+ fix_value = round(value, 2) if isinstance(value, float) else value
+ # add argument
+ simulation.task.command.add_raw_argument(str(fix_value))
# add tag with our value
- simulation.tags[name] = round(value, 2) if isinstance(value, float) else value
+ simulation.tags[name] = fix_value
if __name__ == "__main__":
here = os.path.dirname(__file__)
-
# Create a platform to run the workitem
platform = Platform("CALCULON")
-
# create commandline input for the task
- command = CommandLine("singularity exec ./Assets/covasim_ubuntu.sif python3 Assets/run_sim_sweep.py")
+ command = CommandLine(f"singularity exec ./Assets/covasim_ubuntu.sif python3 Assets/run_sim_sweep.py")
task = CommandTask(command=command)
-
- task.config = dict(pop_size=1000, pop_infected=10, n_days=120, rand_seed=1)
- task.add_pre_creation_hook(create_config_before_provisioning)
-
+ ts = TemplatedSimulations(base_task=task)
# Add our image
task.common_assets.add_assets(AssetCollection.from_id_file("covasim.id"))
-
sb = SimulationBuilder()
# Add sweeps on 3 parameters. Total of 1680 simulations(6x14x21)
sb.add_sweep_definition(partial(set_value, name="pop_size"), [10000, 20000])
sb.add_sweep_definition(partial(set_value, name="pop_infected"), [10, 100, 1000])
sb.add_sweep_definition(partial(set_value, name="n_days"), [100, 110, 120])
sb.add_sweep_definition(partial(set_value, name="rand_seed"), [1234, 4567])
+ ts.add_builder(sb)
-
- experiment = Experiment.from_builder(sb, base_task=task, name=os.path.split(sys.argv[0])[1])
+ experiment = Experiment.from_template(ts, name=os.path.split(sys.argv[0])[1])
experiment.add_asset(os.path.join("inputs", "run_sim_sweep.py"))
experiment.add_asset(os.path.join("inputs", "sim_to_inset.py"))
experiment.run(wait_until_done=True)
if experiment.succeeded:
- experiment.to_id_file("run_sim_sweep.id")
+ experiment.to_id_file("run_sim_sweep.id")
\ No newline at end of file
diff --git a/examples/singularity/covasim/run_covasim_sweep_scheduling.py b/examples/singularity/covasim/run_covasim_sweep_scheduling.py
new file mode 100644
index 000000000..5cdbba54c
--- /dev/null
+++ b/examples/singularity/covasim/run_covasim_sweep_scheduling.py
@@ -0,0 +1,48 @@
+import os
+import sys
+from functools import partial
+from idmtools.assets import AssetCollection
+from idmtools.builders import SimulationBuilder
+from idmtools.core.platform_factory import Platform
+from idmtools.entities import CommandLine
+from idmtools.entities.command_task import CommandTask
+from idmtools.entities.experiment import Experiment
+from idmtools.entities.templated_simulation import TemplatedSimulations
+from idmtools_platform_comps.utils.scheduling import default_add_workerorder_sweep_callback
+
+
+def set_value(simulation, name, value):
+ fix_value = round(value, 2) if isinstance(value, float) else value
+ # add argument
+ simulation.task.command.add_raw_argument(str(fix_value))
+ # add tag with our value
+ simulation.tags[name] = fix_value
+
+
+if __name__ == "__main__":
+ here = os.path.dirname(__file__)
+ # Create a platform to run the workitem
+ platform = Platform("CALCULON")
+ # create commandline input for the task
+ command = CommandLine(f"singularity exec ./Assets/covasim_ubuntu.sif python3 Assets/run_sim_sweep.py")
+ task = CommandTask(command=command)
+
+ ts = TemplatedSimulations(base_task=task)
+ # Add our image
+ task.common_assets.add_assets(AssetCollection.from_id_file("covasim.id"))
+ sb = SimulationBuilder()
+ # Add sweeps on 3 parameters. Total of 1680 simulations(6x14x21)
+ sb.add_sweep_definition(partial(set_value, name="pop_size"), [10000, 20000])
+ sb.add_sweep_definition(partial(set_value, name="pop_infected"), [10, 100, 1000])
+ sb.add_sweep_definition(partial(set_value, name="n_days"), [100, 110, 120])
+ sb.add_sweep_definition(partial(set_value, name="rand_seed"), [1234, 4567])
+ # add file to each simulation
+ sb.add_sweep_definition(partial(default_add_workerorder_sweep_callback, file_name="WorkOrder.json"),
+ "./WorkOrder_orig.json")
+ ts.add_builder(sb)
+ experiment = Experiment.from_template(ts, name=os.path.split(sys.argv[0])[1])
+ experiment.add_asset(os.path.join("inputs", "run_sim_sweep.py"))
+ experiment.add_asset(os.path.join("inputs", "sim_to_inset.py"))
+ experiment.run(wait_until_done=True, scheduling=True)
+ if experiment.succeeded:
+ experiment.to_id_file("run_sim_sweep_scheduling.id")
diff --git a/idmtools_cli/.bumpversion.cfg b/idmtools_cli/.bumpversion.cfg
index c5ac325e7..d9388cb03 100644
--- a/idmtools_cli/.bumpversion.cfg
+++ b/idmtools_cli/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.6.2
+current_version = 1.6.3
commit = False
tag = False
parse = (?P\d+)\.(?P\d+)\.(?P\d+)((?P[\+a-z]+)\.(?P\d+))?
diff --git a/idmtools_cli/idmtools_cli/__init__.py b/idmtools_cli/idmtools_cli/__init__.py
index 21feb4ae3..03bd259f8 100644
--- a/idmtools_cli/idmtools_cli/__init__.py
+++ b/idmtools_cli/idmtools_cli/__init__.py
@@ -1 +1 @@
-__version__ = "1.6.2.0"
+__version__ = "1.6.3.0"
diff --git a/idmtools_cli/idmtools_cli/cli/gitrepo.py b/idmtools_cli/idmtools_cli/cli/gitrepo.py
index 022bb7605..1b8b701a2 100644
--- a/idmtools_cli/idmtools_cli/cli/gitrepo.py
+++ b/idmtools_cli/idmtools_cli/cli/gitrepo.py
@@ -13,7 +13,7 @@
user_logger = getLogger('user')
-@cli.group()
+@cli.group(short_help="Contains commands related to examples download")
def gitrepo():
pass
@@ -137,14 +137,15 @@ def peep(url: Optional[str], raw: Optional[bool]):
@gitrepo.command()
-@click.option('--type', default=None, multiple=True, help="Download examples by type(COMPSPlatform, PythonTask, etc)")
+@click.option('--type', default=None, multiple=True, help="Download examples by type (COMPSPlatform, PythonTask, etc)")
@click.option('--url', default=None, multiple=True, help="Repo files url")
@click.option('--output', default='./', help="Files download destination")
-def download(type: Optional[List[str]], url: Optional[str], output: Optional[str]):
+def download(type: Optional[str], url: Optional[str], output: Optional[str]):
"""
\b
Download files from GitHub repo to user location
Args:
+ type: Object type (COMPSPlatform, PythonTask, etc)
url: GitHub repo files url
output: Local folder
@@ -154,14 +155,15 @@ def download(type: Optional[List[str]], url: Optional[str], output: Optional[str
@examples.command(name='download')
-@click.option('--type', default=None, multiple=True, help="Download examples by type(COMPSPlatform, PythonTask, etc)")
+@click.option('--type', default=None, multiple=True, help="Download examples by type (COMPSPlatform, PythonTask, etc)")
@click.option('--url', default=None, multiple=True, help="Repo files url")
@click.option('--output', default='./', help="Files download destination")
-def download_alias(type: Optional[List[str]], url: Optional[List[str]], output: Optional[str]):
+def download_alias(type: Optional[str], url: Optional[List[str]], output: Optional[str]):
"""
\b
Download examples from specified location
Args:
+ type: Object type (COMPSPlatform, PythonTask, etc)
url: GitHub repo files url
output: Local folder
diff --git a/idmtools_cli/idmtools_cli/cli/package.py b/idmtools_cli/idmtools_cli/cli/package.py
new file mode 100644
index 000000000..4c0c662b1
--- /dev/null
+++ b/idmtools_cli/idmtools_cli/cli/package.py
@@ -0,0 +1,102 @@
+import os
+from logging import getLogger
+import click
+from typing import Optional, List
+from idmtools_cli.cli.entrypoint import cli
+
+user_logger = getLogger('user')
+
+
+@cli.group(short_help="Contains commands related to package versions")
+def package():
+ pass
+
+
+@package.command()
+@click.option('--name', required=True, type=str, help="package name")
+def latest_version(name: Optional[str]):
+ """
+ \b
+ Display the latest version of a package
+ Args:
+ name: package name
+ """
+ from idmtools_platform_comps.utils.package_version import get_latest_version
+ v = get_latest_version(name)
+ print(v)
+
+
+@package.command()
+@click.option('--name', required=True, type=str, help="package name")
+@click.option('--base_version', required=True, default=None, type=str, help="package version")
+def compatible_version(name: Optional[str], base_version: Optional[str]):
+ """
+ \b
+ Display the latest compatible version of a package
+ Args:
+ name: package name
+ base_version: package version
+ """
+ from idmtools_platform_comps.utils.package_version import get_latest_compatible_version
+ v = get_latest_compatible_version(name, base_version)
+ print(v)
+
+
+@package.command()
+@click.option('--name', required=True, type=str, help="package name")
+@click.option('--all/--no-all', type=bool, default=False, help="package version")
+def list_versions(name: Optional[str], all: Optional[bool]):
+ """
+ \b
+ Display all package versions
+ Args:
+ name: package name
+ all: True/False - return all or only released versions
+ """
+ from idmtools_platform_comps.utils.package_version import fetch_package_versions
+ versions = fetch_package_versions(name, not all)
+ print(versions)
+
+
+@package.command()
+@click.argument('requirement', type=click.Path(exists=True), required=False)
+@click.option('--pkg', multiple=True, help="Package for override. Format: 'key==value'")
+@click.option('--wheel', multiple=True, help="Local wheel file")
+def updated_requirements(requirement: str = None, pkg: Optional[List[str]] = None, wheel: Optional[List[str]] = None):
+ """
+ \b
+ Build Updated_Requirements from requirement file
+ Args:
+ pkg: package name (along with version)
+ wheel: package wheel file
+ """
+ from idmtools_platform_comps.utils.python_requirements_ac.requirements_to_asset_collection import \
+ RequirementsToAssetCollection
+
+ pkg_list = list(pkg)
+ wheel_list = [os.path.abspath(w) for w in wheel]
+ pl = RequirementsToAssetCollection(None, requirements_path=requirement, pkg_list=pkg_list, local_wheels=wheel_list)
+ pl.save_updated_requirements()
+ req = open('requirements_updated.txt').read()
+ print(req)
+
+
+@package.command()
+@click.argument('requirement', type=click.Path(exists=True), required=False)
+@click.option('--pkg', multiple=True, help="Package for override. Format: 'key==value'")
+@click.option('--wheel', multiple=True, help="Local wheel file")
+def checksum(requirement: str = None, pkg: Optional[List[str]] = None, wheel: Optional[List[str]] = None):
+ """
+ \b
+ Construct checksum from requirement file
+ Args:
+ pkg: package name (along with version)
+ wheel: package wheel file
+ """
+ from idmtools_platform_comps.utils.python_requirements_ac.requirements_to_asset_collection import \
+ RequirementsToAssetCollection
+
+ pkg_list = list(pkg)
+ wheel_list = [os.path.abspath(w) for w in wheel]
+ pl = RequirementsToAssetCollection(None, requirements_path=requirement, pkg_list=pkg_list, local_wheels=wheel_list)
+ print(pl.checksum)
diff --git a/idmtools_cli/idmtools_cli/main.py b/idmtools_cli/idmtools_cli/main.py
index ae00cd46a..b645c24db 100644
--- a/idmtools_cli/idmtools_cli/main.py
+++ b/idmtools_cli/idmtools_cli/main.py
@@ -14,7 +14,7 @@ def main() -> NoReturn:
os.environ['IDMTOOLS_NO_CONFIG_WARNING'] = '1'
from idmtools_cli.cli.entrypoint import cli
start()
- cli()
+ cli(auto_envvar_prefix='IDMTOOLS_CLI')
def start() -> NoReturn:
@@ -39,6 +39,7 @@ def start() -> NoReturn:
import idmtools_cli.cli.config_file # noqa: F401
import idmtools_cli.cli.system_info # noqa: F401
import idmtools_cli.cli.gitrepo # noqa: F401
+ import idmtools_cli.cli.package # noqa: F401
platform_plugins = PlatformCLIPlugins()
from idmtools_cli.cli.init import build_project_commands
build_project_commands()
diff --git a/idmtools_cli/requirements.txt b/idmtools_cli/requirements.txt
index 6c947eab5..7bafe1bc5 100644
--- a/idmtools_cli/requirements.txt
+++ b/idmtools_cli/requirements.txt
@@ -1,9 +1,9 @@
click~=7.1.2
+click-plugins
colorama~=0.4.4
cookiecutter~=1.7.2
-idmtools~=1.6.2
-tabulate~=0.8.7
+idmtools~=1.6.3
+tabulate>=0.8.9,<0.9
pyperclip~=1.8
tqdm>=4.52.0,<5
-yaspin~=1.2.0
-click-plugins
\ No newline at end of file
+yaspin>=1.2.0,<1.5.0
diff --git a/idmtools_cli/setup.py b/idmtools_cli/setup.py
index ab97d1e3a..078f09941 100644
--- a/idmtools_cli/setup.py
+++ b/idmtools_cli/setup.py
@@ -50,5 +50,5 @@
entry_points={"console_scripts": ["idmtools=idmtools_cli.main:main"]},
extras_require=extras,
url='https://github.com/InstituteforDiseaseModeling/idmtools',
- version='1.6.2'
+ version='1.6.3'
)
diff --git a/idmtools_core/.bumpversion.cfg b/idmtools_core/.bumpversion.cfg
index fc0dc3455..597afd7f7 100644
--- a/idmtools_core/.bumpversion.cfg
+++ b/idmtools_core/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.6.2
+current_version = 1.6.3
commit = False
tag = False
parse = (?P\d+)\.(?P\d+)\.(?P\d+)((?P[\+a-z]+)\.(?P\d+))?
diff --git a/idmtools_core/Makefile b/idmtools_core/Makefile
index 8953b744e..ee774ac68 100644
--- a/idmtools_core/Makefile
+++ b/idmtools_core/Makefile
@@ -5,4 +5,4 @@ help:
$(PDS)get_help_from_makefile.py -f ../dev_scripts/package_general.mk -f ./Makefile
update-dependent-libraries: ## update any libraries in repo that depend on core
- $(PDS)update_core_version.py
\ No newline at end of file
+ $(PDS)update_core_version.py
diff --git a/idmtools_core/build_requirements.txt b/idmtools_core/build_requirements.txt
index 398a16136..9ea66b42a 100644
--- a/idmtools_core/build_requirements.txt
+++ b/idmtools_core/build_requirements.txt
@@ -1,5 +1,6 @@
bump2version~=1.0.1
-coverage~=5.3
-flake8~=3.8.4
-idm-buildtools~=1.0.1
-twine~=3.2.0
\ No newline at end of file
+coverage>=5.3,<5.6
+flake8~=3.9.0
+idm-buildtools~=1.0.3
+twine~=3.4.1
+
diff --git a/idmtools_core/dev_requirements.txt b/idmtools_core/dev_requirements.txt
index 539a9c596..61e280a30 100644
--- a/idmtools_core/dev_requirements.txt
+++ b/idmtools_core/dev_requirements.txt
@@ -1,11 +1,11 @@
-allure-pytest~=2.8.20
-junitparser~=1.6.2
+allure-pytest>=2.8.34,<2.9
+junitparser~=2.0.0
livereload~=2.6.3
pytest-cache~=1.0
-pytest-cov~=2.10.1
-pytest-html~=3.0.0
-pytest-runner~=5.2
+pytest-cov>=2.11.1
+pytest-html~=3.1.1
+pytest-runner~=5.3
pytest-timeout~=1.4.2
-pytest-xdist~=2.1
-pytest~=6.1.2
-xmlrunner~=1.7.7
\ No newline at end of file
+pytest-xdist~=2.2
+pytest~=6.2.2
+xmlrunner~=1.7.7
diff --git a/idmtools_core/idmtools/__init__.py b/idmtools_core/idmtools/__init__.py
index 6ff2ce537..34199cb75 100644
--- a/idmtools_core/idmtools/__init__.py
+++ b/idmtools_core/idmtools/__init__.py
@@ -1,7 +1,7 @@
import sys
from idmtools.core.exceptions import idmtools_error_handler
from idmtools.config.idm_config_parser import IdmConfigParser
-__version__ = "1.6.2.0"
+__version__ = "1.6.3.0"
# only set exception hook if it has not been overridden
if sys.excepthook == sys.__excepthook__:
diff --git a/idmtools_core/idmtools/assets/asset_collection.py b/idmtools_core/idmtools/assets/asset_collection.py
index f43c89f34..643671ddb 100644
--- a/idmtools_core/idmtools/assets/asset_collection.py
+++ b/idmtools_core/idmtools/assets/asset_collection.py
@@ -16,6 +16,8 @@
from idmtools.utils.filters.asset_filters import default_asset_file_filter
from idmtools.utils.info import get_doc_base_url
+IGNORE_DIRECTORIES = ['.git', '.svn', '.venv', '.idea', '.Rproj.user', '$RECYCLE.BIN', '__pycache__']
+
if TYPE_CHECKING: # pragma: no cover
from idmtools.entities.iplatform import IPlatform
@@ -94,7 +96,7 @@ def from_directory(cls, assets_directory: str, recursive: bool = True, flatten:
def assets_from_directory(assets_directory: Union[str, PathLike], recursive: bool = True, flatten: bool = False,
filters: 'TAssetFilterList' = None, # noqa: F821
filters_mode: FilterMode = FilterMode.OR,
- forced_relative_path: str = None) -> List[Asset]:
+ forced_relative_path: str = None, no_ignore: bool = False) -> List[Asset]:
"""
Create assets for files in a given directory.
@@ -107,6 +109,7 @@ def assets_from_directory(assets_directory: Union[str, PathLike], recursive: boo
the collection; False filters it out. See :meth:`~idmtools.utils.filters.asset_filters`.
filters_mode: When given multiple filters, either OR or AND the results.
forced_relative_path: Prefix a relative path to the path created from the root directory.
+ no_ignore: Should we not ignore common directories(.git, .svn. etc) The full list is defined in IGNORE_DIRECTORIES
Examples:
For **relative_path**, given the following folder structure root/a/1,txt root/b.txt and
@@ -121,7 +124,7 @@ def assets_from_directory(assets_directory: Union[str, PathLike], recursive: boo
if isinstance(assets_directory, PathLike):
assets_directory = str(assets_directory)
found_assets = []
- for entry in scan_directory(assets_directory, recursive):
+ for entry in scan_directory(assets_directory, recursive, IGNORE_DIRECTORIES if not no_ignore else None):
relative_path = os.path.relpath(os.path.dirname(entry.path), assets_directory)
found_assets.append(Asset(absolute_path=os.path.abspath(entry.path),
relative_path=None if relative_path == "." else relative_path,
@@ -161,14 +164,14 @@ def copy(self) -> 'AssetCollection':
def add_directory(self, assets_directory: Union[str, PathLike], recursive: bool = True, flatten: bool = False,
filters: 'TAssetFilterList' = None, filters_mode: FilterMode = FilterMode.OR, # noqa: F821
- relative_path: str = None):
+ relative_path: str = None, no_ignore: bool = False):
"""
Retrieve assets from the specified directory and add them to the collection.
See :meth:`~AssetCollection.assets_from_directory` for arguments.
"""
if isinstance(assets_directory, PathLike):
assets_directory = str(assets_directory)
- assets = AssetCollection.assets_from_directory(assets_directory, recursive, flatten, filters, filters_mode, relative_path)
+ assets = AssetCollection.assets_from_directory(assets_directory, recursive, flatten, filters, filters_mode, relative_path, no_ignore)
for asset in assets:
self.add_asset(asset)
diff --git a/idmtools_core/idmtools/config/idm_config_parser.py b/idmtools_core/idmtools/config/idm_config_parser.py
index 123714375..b10265189 100644
--- a/idmtools_core/idmtools/config/idm_config_parser.py
+++ b/idmtools_core/idmtools/config/idm_config_parser.py
@@ -54,7 +54,10 @@ def __new__(cls, dir_path: str = '.', file_name: str = default_config) -> 'IdmCo
cls._instance._load_config_file(dir_path, file_name)
# Only error when a user overrides the filename for idmtools.ini
if (dir_path != "." or file_name != default_config) and not cls.found_ini():
- raise ValueError(f"The configuration file {os.path.join(dir_path, file_name)} was not found!")
+ raise FileNotFoundError(f"The configuration file {os.path.join(dir_path, file_name)} was not found!")
+ # Call our startup plugins
+ from idmtools.registry.functions import FunctionPluginManager
+ FunctionPluginManager.instance().hook.idmtools_on_start()
return cls._instance
@classmethod
@@ -156,10 +159,12 @@ def _load_config_file(cls, dir_path: str = None, file_name: str = default_config
# init logging here as this is our most likely entry-point into an idmtools "application"
from idmtools.core.logging import VERBOSE
+ # Look for the config file. First check environment vars
if "IDMTOOLS_CONFIG_FILE" in os.environ:
if not os.path.exists(os.environ["IDMTOOLS_CONFIG_FILE"]):
raise FileNotFoundError(f'Cannot for idmtools config at {os.environ["IDMTOOLS_CONFIG_FILE"]}')
ini_file = os.environ["IDMTOOLS_CONFIG_FILE"]
+ # Try find file
else:
ini_file = cls._find_config(dir_path, file_name)
# Fallback to user home directories
@@ -167,6 +172,8 @@ def _load_config_file(cls, dir_path: str = None, file_name: str = default_config
global_config = cls.get_global_configuration_name()
if os.path.exists(global_config):
ini_file = global_config
+
+ # If we didn't find a file, warn the user and init logging
if ini_file is None:
if os.getenv("IDMTOOLS_NO_CONFIG_WARNING", "F").lower() not in TRUTHY_VALUES:
# We use print since logger isn't configured unless there is an override(cli)
@@ -174,6 +181,7 @@ def _load_config_file(cls, dir_path: str = None, file_name: str = default_config
cls._init_logging()
return
+ # Load file
cls._config_path = ini_file
cls._config = ConfigParser()
cls._config.read(ini_file)
@@ -207,7 +215,7 @@ def _init_logging(cls):
# Do import locally to prevent load error
from idmtools import __version__
- if "+nightly" in __version__ and os.getenv('IDMTOOLS_HIDE_DEV_WARNING', None) is None:
+ if "+nightly" in __version__ and os.getenv('IDMTOOLS_HIDE_DEV_WARNING', None) is None and os.getenv("_IDMTOOLS_COMPLETE", None) is None:
user_logger.warning(f"You are using a development version of idmtools, version {__version__}!")
@classmethod
@@ -312,10 +320,6 @@ def ensure_init(cls, dir_path: str = '.', file_name: str = default_config, force
if cls._instance is None:
cls(dir_path, file_name)
- # Call our startup plugins
- from idmtools.registry.functions import FunctionPluginManager
- FunctionPluginManager.instance().hook.idmtools_on_start()
-
@classmethod
@initialization()
def get_config_path(cls) -> str:
diff --git a/idmtools_core/idmtools/entities/experiment.py b/idmtools_core/idmtools/entities/experiment.py
index 12b783b2c..c813117b4 100644
--- a/idmtools_core/idmtools/entities/experiment.py
+++ b/idmtools_core/idmtools/entities/experiment.py
@@ -17,7 +17,8 @@
from idmtools.entities.itask import ITask
from idmtools.entities.platform_requirements import PlatformRequirements
from idmtools.entities.templated_simulation import TemplatedSimulations
-from idmtools.registry.experiment_specification import ExperimentPluginSpecification, get_model_impl, get_model_type_impl
+from idmtools.registry.experiment_specification import ExperimentPluginSpecification, get_model_impl, \
+ get_model_type_impl
from idmtools.registry.plugin_specification import get_description_impl
from idmtools.utils.collections import ExperimentParentIterator
from idmtools.utils.entities import get_default_tags
@@ -93,9 +94,11 @@ def status(self):
status = None # this will trigger experiment creation on a platform
elif any([s == EntityStatus.RUNNING for s in sim_statuses]):
status = EntityStatus.RUNNING
- elif any([s == EntityStatus.CREATED for s in sim_statuses]) and any([s in [EntityStatus.FAILED, EntityStatus.SUCCEEDED] for s in sim_statuses]):
+ elif any([s == EntityStatus.CREATED for s in sim_statuses]) and any(
+ [s in [EntityStatus.FAILED, EntityStatus.SUCCEEDED] for s in sim_statuses]):
status = EntityStatus.RUNNING
- elif any([s is None for s in sim_statuses]) and any([s in [EntityStatus.FAILED, EntityStatus.SUCCEEDED] for s in sim_statuses]):
+ elif any([s is None for s in sim_statuses]) and any(
+ [s in [EntityStatus.FAILED, EntityStatus.SUCCEEDED] for s in sim_statuses]):
status = EntityStatus.CREATED
elif any([s == EntityStatus.FAILED for s in sim_statuses]):
status = EntityStatus.FAILED
@@ -393,7 +396,8 @@ def list_static_assets(self, children: bool = False, platform: 'IPlatform' = Non
p = super()._check_for_platform_from_context(platform)
return p._experiments.list_assets(self, children, **kwargs)
- def run(self, wait_until_done: bool = False, platform: 'IPlatform' = None, regather_common_assets: bool = None, wait_on_done_progress: bool = True, wait_on_done: bool = False,
+ def run(self, wait_until_done: bool = False, platform: 'IPlatform' = None, regather_common_assets: bool = None,
+ wait_on_done_progress: bool = True, wait_on_done: bool = False,
**run_opts) -> NoReturn:
"""
Runs an experiment on a platform
@@ -417,8 +421,10 @@ def run(self, wait_until_done: bool = False, platform: 'IPlatform' = None, regat
message = "To modify an experiment's asset collection, you must make a copy of it first. For example\nexperiment.assets = experiment.assets.copy()"
user_logger.error(message) # Show it bold red to user
raise ValueError(message)
- if not self.assets.is_editable() and isinstance(self.simulations.items, TemplatedSimulations) and not regather_common_assets:
- user_logger.warning("You are modifying and existing experiment by using a template without gathering common assets. Ensure your Template configuration is the same as existing experiments or enable gathering of new common assets through regather_common_assets.")
+ if not self.assets.is_editable() and isinstance(self.simulations.items,
+ TemplatedSimulations) and not regather_common_assets:
+ user_logger.warning(
+ "You are modifying and existing experiment by using a template without gathering common assets. Ensure your Template configuration is the same as existing experiments or enable gathering of new common assets through regather_common_assets.")
run_opts['regather_common_assets'] = regather_common_assets
p.run_items(self, **run_opts)
if wait_until_done or wait_on_done:
@@ -436,7 +442,8 @@ def to_dict(self):
# Define this here for better completion in IDEs for end users
@classmethod
- def from_id(cls, item_id: Union[str, uuid.UUID], platform: 'IPlatform' = None, copy_assets: bool = False, **kwargs) -> 'Experiment':
+ def from_id(cls, item_id: Union[str, uuid.UUID], platform: 'IPlatform' = None, copy_assets: bool = False,
+ **kwargs) -> 'Experiment':
"""
Helper function to provide better intellisense to end users
diff --git a/idmtools_core/idmtools/entities/iplatform.py b/idmtools_core/idmtools/entities/iplatform.py
index cd31d4904..c110f0e2a 100644
--- a/idmtools_core/idmtools/entities/iplatform.py
+++ b/idmtools_core/idmtools/entities/iplatform.py
@@ -415,12 +415,13 @@ def get_cache_key(self, force, item_id, item_type, kwargs, raw, prefix='p'):
self.cache.delete(cache_key)
return cache_key
- def create_items(self, items: Union[List[IEntity], IEntity]) -> List[IEntity]:
+ def create_items(self, items: Union[List[IEntity], IEntity], **kwargs) -> List[IEntity]:
"""
Create items (simulations, experiments, or suites) on the platform. The function will batch the items based on
type and call the self._create_batch for creation
Args:
items: The list of items to create.
+ kwargs: Extra arguments
Returns:
List of item IDs created.
"""
@@ -431,7 +432,7 @@ def create_items(self, items: Union[List[IEntity], IEntity]) -> List[IEntity]:
result = []
for key, group in groupby(items, lambda x: x.item_type):
- result.extend(self._create_items_of_type(group, key))
+ result.extend(self._create_items_of_type(group, key, **kwargs))
return result
def _create_items_of_type(self, items: Iterator[IEntity], item_type: ItemType, **kwargs):
@@ -712,8 +713,9 @@ def __wait_until_done_progress_callback(item: Union[Experiment, IWorkflowItem],
if child_attribute is None:
if isinstance(item, IWorkflowItem):
if item.status in done_states:
- progress_bar.update(1)
- progress_bar.close()
+ if progress_bar:
+ progress_bar.update(1)
+ progress_bar.close()
return True
return False
else:
diff --git a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_asset_collection_operations.py b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_asset_collection_operations.py
index da58cfc34..26fa30827 100644
--- a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_asset_collection_operations.py
+++ b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_asset_collection_operations.py
@@ -1,5 +1,6 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
+from logging import DEBUG, getLogger
from typing import Any, List, Type, NoReturn, TYPE_CHECKING
from uuid import UUID
from idmtools.assets import AssetCollection
@@ -9,6 +10,7 @@
if TYPE_CHECKING: # pragma: no cover
from idmtools.entities.iplatform import IPlatform
+logger = getLogger(__name__)
@dataclass
@@ -27,7 +29,11 @@ def pre_create(self, asset_collection: AssetCollection, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling idmtools_platform_pre_create_item")
FunctionPluginManager.instance().hook.idmtools_platform_pre_create_item(item=asset_collection, kwargs=kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_creation")
asset_collection.pre_creation(self.platform)
def post_create(self, asset_collection: AssetCollection, **kwargs) -> NoReturn:
@@ -41,6 +47,8 @@ def post_create(self, asset_collection: AssetCollection, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_creation")
asset_collection.post_creation(self.platform)
def create(self, asset_collection: AssetCollection, do_pre: bool = True, do_post: bool = True, **kwargs) -> Any:
@@ -60,9 +68,15 @@ def create(self, asset_collection: AssetCollection, do_pre: bool = True, do_post
if asset_collection.status is not None:
return asset_collection._platform_object
if do_pre:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_create")
self.pre_create(asset_collection, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_create")
ret = self.platform_create(asset_collection, **kwargs)
if do_post:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_create")
self.post_create(asset_collection, **kwargs)
return ret
diff --git a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_experiment_operations.py b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_experiment_operations.py
index c68809a90..c15548eb3 100644
--- a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_experiment_operations.py
+++ b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_experiment_operations.py
@@ -2,7 +2,7 @@
from concurrent.futures import as_completed
from concurrent.futures.thread import ThreadPoolExecutor
from dataclasses import dataclass
-from logging import getLogger
+from logging import getLogger, DEBUG
from types import GeneratorType
from typing import Type, Any, NoReturn, Tuple, List, Dict, Iterator, Union, TYPE_CHECKING
from uuid import UUID
@@ -47,7 +47,11 @@ def pre_create(self, experiment: Experiment, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling idmtools_platform_pre_create_item hooks")
FunctionPluginManager.instance().hook.idmtools_platform_pre_create_item(item=experiment, kwargs=kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling experiment pre_creation")
experiment.pre_creation(self.platform)
def post_create(self, experiment: Experiment, **kwargs) -> NoReturn:
@@ -61,6 +65,8 @@ def post_create(self, experiment: Experiment, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling experiment post_creation")
experiment.post_creation(self.platform)
def create(self, experiment: Experiment, do_pre: bool = True, do_post: bool = True, **kwargs) -> \
@@ -79,14 +85,26 @@ def create(self, experiment: Experiment, do_pre: bool = True, do_post: bool = Tr
Created platform item and the UUID of said item
"""
if experiment.status is not None:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling experiment platform_modify_experiment")
experiment = self.platform_modify_experiment(experiment, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished platform_modify_experiment")
return experiment
if do_pre:
self.pre_create(experiment, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished pre_create")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_create")
experiment._platform_object = self.platform_create(experiment, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished platform_create")
experiment.platform = self.platform
if do_post:
self.post_create(experiment, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished post_create")
return experiment
@abstractmethod
@@ -173,21 +191,34 @@ def pre_run_item(self, experiment: Experiment, **kwargs):
Returns:
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_run")
experiment.pre_run(self.platform)
# ensure the item is created before running
if experiment.status is None:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling create")
self.create(experiment, **kwargs)
else:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_modify_experiment")
experiment = self.platform_modify_experiment(experiment, **kwargs)
# check sims
- logger.debug("Ensuring simulations exist")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Ensuring simulations exist")
if isinstance(experiment.simulations, (GeneratorType, Iterator)):
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling _create_items_of_type for sims")
experiment.simulations = self.platform._create_items_of_type(experiment.simulations, ItemType.SIMULATION, **kwargs)
elif len(experiment.simulations) == 0:
raise ValueError("You cannot have an experiment with no simulations")
else:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling _create_items_of_type for sims")
experiment.simulations = self.platform._create_items_of_type(experiment.simulations, ItemType.SIMULATION, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished checking simulations")
def post_run_item(self, experiment: Experiment, **kwargs):
"""
@@ -212,9 +243,15 @@ def run_item(self, experiment: Experiment, **kwargs):
Returns:
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_run_item")
self.pre_run_item(experiment, **kwargs)
if experiment.status not in [EntityStatus.FAILED, EntityStatus.SUCCEEDED]:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_run_item")
self.platform_run_item(experiment, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_run_item")
self.post_run_item(experiment, **kwargs)
@abstractmethod
diff --git a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_simulation_operations.py b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_simulation_operations.py
index 1f8f855d7..1eed75a9c 100644
--- a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_simulation_operations.py
+++ b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_simulation_operations.py
@@ -1,8 +1,8 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
+from logging import getLogger, DEBUG
from typing import Type, Any, List, Dict, NoReturn, Optional
from uuid import UUID
-
from idmtools.assets import Asset
from idmtools.core.cache_enabled import CacheEnabled
from idmtools.entities.experiment import Experiment
@@ -10,6 +10,8 @@
from idmtools.entities.simulation import Simulation
from idmtools.registry.functions import FunctionPluginManager
+logger = getLogger(__name__)
+
@dataclass
class IPlatformSimulationOperations(CacheEnabled, ABC):
@@ -41,7 +43,11 @@ def pre_create(self, simulation: Simulation, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling idmtools_platform_pre_create_item")
FunctionPluginManager.instance().hook.idmtools_platform_pre_create_item(item=simulation, kwargs=kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_creation")
simulation.pre_creation(self.platform)
def post_create(self, simulation: Simulation, **kwargs) -> NoReturn:
@@ -55,6 +61,8 @@ def post_create(self, simulation: Simulation, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_creation")
simulation.post_creation(self.platform)
def create(self, simulation: Simulation, do_pre: bool = True, do_post: bool = True, **kwargs) -> Any:
@@ -75,9 +83,17 @@ def create(self, simulation: Simulation, do_pre: bool = True, do_post: bool = Tr
return simulation
if do_pre:
self.pre_create(simulation, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished pre_create")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_create")
ret = self.platform_create(simulation, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished platform_create")
if do_post:
self.post_create(simulation, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished post_create")
return ret
@abstractmethod
diff --git a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_suite_operations.py b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_suite_operations.py
index e0fccfc7b..f13a45db4 100644
--- a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_suite_operations.py
+++ b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_suite_operations.py
@@ -1,5 +1,6 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
+from logging import getLogger, DEBUG
from typing import Type, Any, List, Tuple, Dict, NoReturn, TYPE_CHECKING
from uuid import UUID
from idmtools.core.enums import EntityStatus, ItemType
@@ -10,6 +11,8 @@
if TYPE_CHECKING: # pragma: no cover
from idmtools.entities.iplatform import IPlatform
+logger = getLogger(__name__)
+
@dataclass
class IPlatformSuiteOperations(ABC):
@@ -57,7 +60,11 @@ def pre_create(self, suite: Suite, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling idmtools_platform_pre_create_item")
FunctionPluginManager.instance().hook.idmtools_platform_pre_create_item(item=suite, kwargs=kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_creation")
suite.pre_creation(self.platform)
def post_create(self, suite: Suite, **kwargs) -> NoReturn:
@@ -95,9 +102,17 @@ def create(self, suite: Suite, do_pre: bool = True, do_post: bool = True, **kwar
return suite._platform_object, suite.uid
if do_pre:
self.pre_create(suite, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished pre_create")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_create")
ret = self.platform_create(suite, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished platform_create")
if do_post:
self.post_create(suite, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished post_create")
return ret
@abstractmethod
@@ -135,7 +150,11 @@ def pre_run_item(self, suite: Suite, **kwargs):
if exp.status is None:
exps_to_commission.append(exp)
if exps_to_commission:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling run_items")
self.platform.run_items(exps_to_commission, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Finished run_items")
def post_run_item(self, suite: Suite, **kwargs):
"""
@@ -159,8 +178,14 @@ def run_item(self, suite: Suite, **kwargs):
Returns:
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_run_item")
self.pre_run_item(suite)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_run_item")
self.platform_run_item(suite)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_run_item")
self.post_run_item(suite)
def platform_run_item(self, suite: Suite, **kwargs):
diff --git a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_workflowitem_operations.py b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_workflowitem_operations.py
index addb95038..9b8ed8662 100644
--- a/idmtools_core/idmtools/entities/iplatform_ops/iplatform_workflowitem_operations.py
+++ b/idmtools_core/idmtools/entities/iplatform_ops/iplatform_workflowitem_operations.py
@@ -1,9 +1,10 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
+from logging import DEBUG
from typing import Type, Any, List, Tuple, Dict, NoReturn, TYPE_CHECKING
from uuid import UUID
from idmtools.assets import Asset
-from idmtools.core import CacheEnabled
+from idmtools.core import CacheEnabled, getLogger
from idmtools.entities.iplatform_ops.utils import batch_create_items
from idmtools.entities.iworkflow_item import IWorkflowItem
from idmtools.registry.functions import FunctionPluginManager
@@ -11,6 +12,8 @@
if TYPE_CHECKING: # pragma: no cover
from idmtools.entities.iplatform import IPlatform
+logger = getLogger(__name__)
+
@dataclass
class IPlatformWorkflowItemOperations(CacheEnabled, ABC):
@@ -57,7 +60,11 @@ def pre_create(self, workflow_item: IWorkflowItem, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling idmtools_platform_pre_create_item")
FunctionPluginManager.instance().hook.idmtools_platform_pre_create_item(item=workflow_item, kwargs=kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_creation")
workflow_item.pre_creation(self.platform)
def post_create(self, workflow_item: IWorkflowItem, **kwargs) -> NoReturn:
@@ -71,6 +78,8 @@ def post_create(self, workflow_item: IWorkflowItem, **kwargs) -> NoReturn:
Returns:
NoReturn
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_creation")
workflow_item.post_creation(self.platform)
def create(self, workflow_item: IWorkflowItem, do_pre: bool = True, do_post: bool = True, **kwargs) -> Any:
@@ -91,10 +100,16 @@ def create(self, workflow_item: IWorkflowItem, do_pre: bool = True, do_post: boo
if workflow_item.status is not None:
return workflow_item._platform_object, workflow_item.uid
if do_pre:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_create")
self.pre_create(workflow_item, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_create")
ret = self.platform_create(workflow_item, **kwargs)
workflow_item.platform = self.platform
if do_post:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_create")
self.post_create(workflow_item, **kwargs)
return ret
@@ -126,6 +141,8 @@ def pre_run_item(self, workflow_item: IWorkflowItem, **kwargs):
# ensure the item is created before running
# TODO what status are valid here? Create only?
if workflow_item.status is None:
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling create")
self.create(workflow_item)
def post_run_item(self, workflow_item: IWorkflowItem, **kwargs):
@@ -150,8 +167,14 @@ def run_item(self, workflow_item: IWorkflowItem, **kwargs):
Returns:
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling pre_run_item")
self.pre_run_item(workflow_item, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling platform_run_item")
self.platform_run_item(workflow_item, **kwargs)
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Calling post_run_item")
self.post_run_item(workflow_item, **kwargs)
@abstractmethod
diff --git a/idmtools_core/idmtools/entities/iplatform_ops/utils.py b/idmtools_core/idmtools/entities/iplatform_ops/utils.py
index fd5d7128d..284649083 100644
--- a/idmtools_core/idmtools/entities/iplatform_ops/utils.py
+++ b/idmtools_core/idmtools/entities/iplatform_ops/utils.py
@@ -32,7 +32,7 @@ def batch_items(items: Union[Iterable, Generator], batch_size=16):
raise StopIteration
-def item_batch_worker_thread(create_func: Callable, items: Union[List]) -> List:
+def item_batch_worker_thread(create_func: Callable, items: Union[List], **kwargs) -> List:
"""
Default batch worker thread function. It just calls create on each item
@@ -48,7 +48,7 @@ def item_batch_worker_thread(create_func: Callable, items: Union[List]) -> List:
ret = []
for item in items:
- ret.append(create_func(item))
+ ret.append(create_func(item, **kwargs))
return ret
diff --git a/idmtools_core/idmtools/entities/itask.py b/idmtools_core/idmtools/entities/itask.py
index 172b5c91d..ecc407cb3 100644
--- a/idmtools_core/idmtools/entities/itask.py
+++ b/idmtools_core/idmtools/entities/itask.py
@@ -231,7 +231,7 @@ def to_dict(self) -> Dict:
Returns: dict
"""
- from idmtools_platform_comps.comps_platform import COMPSPlatform
+
from idmtools.core.context import get_current_platform
result = dict()
@@ -239,11 +239,15 @@ def to_dict(self) -> Dict:
platform = get_current_platform()
for f in fields(self):
if not f.name.startswith("_") and f.name not in ['parent']:
- if isinstance(platform, COMPSPlatform):
- if f.name in metadata_fields:
- result[f.name] = getattr(self, f.name)
+ try:
+ from idmtools_platform_comps.comps_platform import COMPSPlatform
+ if isinstance(platform, COMPSPlatform):
+ if f.name in metadata_fields:
+ result[f.name] = getattr(self, f.name)
+ else:
+ result[f.name] = f.default
else:
- result[f.name] = f.default
- else:
+ result[f.name] = getattr(self, f.name)
+ except ImportError:
result[f.name] = getattr(self, f.name)
return result
diff --git a/idmtools_core/idmtools/entities/simulation.py b/idmtools_core/idmtools/entities/simulation.py
index 2f0b5f76f..75b5cef81 100644
--- a/idmtools_core/idmtools/entities/simulation.py
+++ b/idmtools_core/idmtools/entities/simulation.py
@@ -15,7 +15,6 @@
from idmtools.entities.iplatform import IPlatform
from idmtools.entities.experiment import Experiment
-
logger = getLogger(__name__)
user_logger = getLogger('user')
diff --git a/idmtools_core/idmtools/utils/file.py b/idmtools_core/idmtools/utils/file.py
index 70a5cf6a4..d9dcb9bcc 100644
--- a/idmtools_core/idmtools/utils/file.py
+++ b/idmtools_core/idmtools/utils/file.py
@@ -1,15 +1,16 @@
import os
from os import DirEntry
-from typing import Iterable, Generator
+from typing import Iterable, Generator, List
-def scan_directory(basedir: str, recursive: bool = True) -> Iterable[DirEntry]:
+def scan_directory(basedir: str, recursive: bool = True, ignore_directories: List[str] = None) -> Iterable[DirEntry]:
"""
Scan a directory recursively or not.
Args:
basedir: The root directory to start from.
recursive: True to search the subfolders recursively; False to stay in the root directory.
+ ignore_directories: Ignore directories
Returns:
An iterator yielding all the files found.
@@ -18,7 +19,8 @@ def scan_directory(basedir: str, recursive: bool = True) -> Iterable[DirEntry]:
if entry.is_file():
yield entry
elif recursive:
- yield from scan_directory(entry.path)
+ if ignore_directories is None or entry.name not in ignore_directories:
+ yield from scan_directory(entry.path)
def file_contents_to_generator(filename, chunk_size=128) -> Generator[bytearray, None, None]:
diff --git a/idmtools_core/requirements.txt b/idmtools_core/requirements.txt
index 6592d2e09..9264a4b66 100644
--- a/idmtools_core/requirements.txt
+++ b/idmtools_core/requirements.txt
@@ -1,13 +1,13 @@
backoff>=1.10.0,<1.11
-coloredlogs~=14.0
-diskcache==5.1.0
-humanfriendly~=8.2
-more-itertools~=8.6.0
+coloredlogs~=15.0
+diskcache~=5.1.0
+more-itertools~=8.7.0
numpy!=1.19.4
pandas>=1.1.4,<1.2
pipreqs>=0.4.10,<0.5
pluggy~=0.13.1
-PyYaml>=5.3.0,<5.4
-tabulate~=0.8.7
+PyYaml>=5.3.0,<5.5
+tabulate>=0.8.9,<0.9
tqdm>=4.52.0,<5
packaging>=20.4,<21.0
+pygit2>=1.4.0,<1.6.0
diff --git a/idmtools_core/setup.py b/idmtools_core/setup.py
index d03efe3b7..ee2e58842 100644
--- a/idmtools_core/setup.py
+++ b/idmtools_core/setup.py
@@ -15,7 +15,7 @@
with open(f'{filename}.txt') as requirements_file:
extra_require_files[file_prefix.strip("_") if file_prefix else filename] = requirements_file.read().split("\n")
-version = '1.6.2'
+version = '1.6.3'
extras = {
'test': extra_require_files['build'] + extra_require_files['dev'],
diff --git a/idmtools_core/tests/test_assets.py b/idmtools_core/tests/test_assets.py
index f1da3b604..edc96e695 100644
--- a/idmtools_core/tests/test_assets.py
+++ b/idmtools_core/tests/test_assets.py
@@ -1,3 +1,5 @@
+from pathlib import PurePath
+
import allure
import json
import os
@@ -10,6 +12,7 @@
from idmtools.core import FilterMode
from idmtools.utils.filters.asset_filters import asset_in_directory, file_name_is
from idmtools_test import COMMON_INPUT_PATH
+from idmtools_test.utils.decorators import run_in_temp_dir
@pytest.mark.assets
@@ -325,6 +328,27 @@ def test_large_asset_merge_speed(self):
assets2.add_asset(Asset(content=f"{i}", filename=f"{i}"))
assets1.add_assets(assets2)
+ @run_in_temp_dir
+ def test_ignore_git(self):
+ # make test data
+ bd = PurePath("test_directory")
+ gd = bd.joinpath(".git")
+ os.makedirs(gd, exist_ok=True)
+
+ with open(bd.joinpath("test1.txt"), "w") as fout:
+ fout.write("1")
+
+ with open(gd.joinpath("test2.txt"), "w") as fout:
+ fout.write("2")
+
+ ac = AssetCollection()
+ ac.add_directory(bd)
+
+ self.assertEqual(len(ac), 1)
+
+ ac = AssetCollection()
+ ac.add_directory(bd, no_ignore=True)
+ self.assertEqual(len(ac), 2)
if __name__ == '__main__':
unittest.main()
diff --git a/idmtools_core/tests/test_configuration.py b/idmtools_core/tests/test_configuration.py
index 6128cbe46..9a6b44ec5 100644
--- a/idmtools_core/tests/test_configuration.py
+++ b/idmtools_core/tests/test_configuration.py
@@ -104,7 +104,7 @@ def test_idmtools_ini_option(self, login_mock):
@skip_if_global_configuration_is_enabled
def test_non_standard_name_fails_when_not_found(self):
- with self.assertRaises(ValueError) as err:
+ with self.assertRaises(FileNotFoundError) as err:
IdmConfigParser(file_name="idmtools_does_not_exist.ini")
self.assertIn("idmtools_does_not_exist.ini was not found!", err.exception.args[0])
diff --git a/idmtools_models/.bumpversion.cfg b/idmtools_models/.bumpversion.cfg
index 05b254620..b95a3e72c 100644
--- a/idmtools_models/.bumpversion.cfg
+++ b/idmtools_models/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.6.2
+current_version = 1.6.3
commit = False
tag = False
parse = (?P\d+)\.(?P\d+)\.(?P\d+)((?P[\+a-z]+)\.(?P\d+))?
diff --git a/idmtools_models/idmtools_models/__init__.py b/idmtools_models/idmtools_models/__init__.py
index 21feb4ae3..03bd259f8 100644
--- a/idmtools_models/idmtools_models/__init__.py
+++ b/idmtools_models/idmtools_models/__init__.py
@@ -1 +1 @@
-__version__ = "1.6.2.0"
+__version__ = "1.6.3.0"
diff --git a/idmtools_models/requirements.txt b/idmtools_models/requirements.txt
index 75ffb2f41..949eee77b 100644
--- a/idmtools_models/requirements.txt
+++ b/idmtools_models/requirements.txt
@@ -1,2 +1,2 @@
-idmtools~=1.6.2
-jinja2~=2.11.2
\ No newline at end of file
+idmtools~=1.6.3
+jinja2~=2.11.3
\ No newline at end of file
diff --git a/idmtools_models/setup.py b/idmtools_models/setup.py
index 8bd393590..3f9e6620d 100644
--- a/idmtools_models/setup.py
+++ b/idmtools_models/setup.py
@@ -63,5 +63,5 @@
test_suite='tests',
extras_require=extras,
url='https://github.com/InstituteforDiseaseModeling/idmtools',
- version='1.6.2'
+ version='1.6.3'
)
diff --git a/idmtools_platform_comps/.bumpversion.cfg b/idmtools_platform_comps/.bumpversion.cfg
index 564d1e85f..7c15cc078 100644
--- a/idmtools_platform_comps/.bumpversion.cfg
+++ b/idmtools_platform_comps/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.6.2
+current_version = 1.6.3
commit = False
tag = False
parse = (?P\d+)\.(?P\d+)\.(?P\d+)((?P[\+a-z]+)\.(?P\d+))?
diff --git a/idmtools_platform_comps/VERSION b/idmtools_platform_comps/VERSION
index 308b6faa7..f5d2a5858 100644
--- a/idmtools_platform_comps/VERSION
+++ b/idmtools_platform_comps/VERSION
@@ -1 +1 @@
-1.6.2
\ No newline at end of file
+1.6.3
\ No newline at end of file
diff --git a/idmtools_platform_comps/idmtools_platform_comps/__init__.py b/idmtools_platform_comps/idmtools_platform_comps/__init__.py
index ff947837d..3a27df73a 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/__init__.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/__init__.py
@@ -4,4 +4,4 @@
from idmtools_platform_comps.comps_cli import CompsCLI
except ImportError:
pass
-__version__ = "1.6.2.0"
+__version__ = "1.6.3.0"
diff --git a/idmtools_platform_comps/idmtools_platform_comps/cli/comps.py b/idmtools_platform_comps/idmtools_platform_comps/cli/comps.py
index 529039ce8..fadcba0b8 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/cli/comps.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/cli/comps.py
@@ -2,6 +2,7 @@
import json as json_parser
import os
import sys
+from typing import Optional, List
import tabulate
from getpass import getpass
from logging import getLogger
@@ -80,12 +81,13 @@ def login(ctx: click.Context, username, password):
if password:
user_logger.warning("Password the password via the command line is considered insecure")
else:
- password = getpass("Password")
+ password = getpass("Password:")
# make platform object to load info from alias or config but don't login
platform = Platform(ctx.obj['config_block'], _skip_login=True)
try:
- Client.login(platform.endpoint, StaticCredentialPrompt(comps_url=platform.endpoint, username=username, password=password))
+ Client.login(platform.endpoint,
+ StaticCredentialPrompt(comps_url=platform.endpoint, username=username, password=password))
user_logger.log(SUCCESS, "Login succeeded")
except PermissionError:
user_logger.error(f"Could not loging to {platform.endpoint}")
@@ -97,28 +99,57 @@ def login(ctx: click.Context, username, password):
@click.option('--experiment', default=[], multiple=True, help="Experiment ids to filter for files to download")
@click.option('--simulation', default=[], multiple=True, help="Simulation ids to filter for files to download")
@click.option('--work-item', default=[], multiple=True, help="WorkItems ids to filter for files to download")
- @click.option('--asset-collection', default=[], multiple=True, help="Asset Collection ids to filter for files to download")
- @click.option('--dry-run/--no-dry-run', default=False, help="Gather a list of files that would be downloaded instead of actually downloading")
+ @click.option('--asset-collection', default=[], multiple=True,
+ help="Asset Collection ids to filter for files to download")
+ @click.option('--dry-run/--no-dry-run', default=False,
+ help="Gather a list of files that would be downloaded instead of actually downloading")
@click.option('--wait/--no-wait', default=True, help="Wait on item to finish")
- @click.option('--include-assets/--no-include-assets', default=False, help="Scan common assets of WorkItems and Experiments when filtering")
+ @click.option('--include-assets/--no-include-assets', default=False,
+ help="Scan common assets of WorkItems and Experiments when filtering")
@click.option('--verbose/--no-verbose', default=True, help="Enable verbose output in worker")
@click.option('--json/--no-json', default=False, help="Outputs File list as JSON when used with dry run")
- @click.option('--simulation-prefix-format-str', default=None, help="Simulation Prefix Format str. Defaults to '{simulation.id}'. For no prefix, pass a empty string")
+ @click.option('--simulation-prefix-format-str', default=None,
+ help="Simulation Prefix Format str. Defaults to '{simulation.id}'. For no prefix, pass a empty string")
@click.option('--work-item-prefix-format-str', default=None, help="WorkItem Prefix Format str. Defaults to ''")
@click.option('--name', default=None, help="Name of Download Workitem. If not provided, one will be generated")
@click.option('--output-path', default=os.getcwd(), help="Output path to save zip")
+ @click.option('--delete-after-download/--no-delete-after-download', default=True,
+ help="Delete the workitem used to gather files after download")
+ @click.option('--extract-after-download/--no-extract-after-download', default=True,
+ help="Extract zip after download")
+ @click.option('--zip-name', default="output.zip", help="Name of zipfile")
@click.pass_context
def download(
- ctx: click.Context, pattern, exclude_pattern, experiment, simulation, work_item, asset_collection, dry_run, wait,
+ ctx: click.Context, pattern, exclude_pattern, experiment, simulation, work_item, asset_collection, dry_run,
+ wait,
include_assets, verbose, json, simulation_prefix_format_str, work_item_prefix_format_str, name, output_path,
+ delete_after_download,
+ extract_after_download, zip_name
):
from idmtools_platform_comps.utils.download.download import DownloadWorkItem
+
+ if json and not dry_run:
+ user_logger.error("You cannot return JSON without enabling dry-run mode")
+ sys.exit(-1)
+
+ if dry_run and delete_after_download:
+ user_logger.warning(
+ "You are using dry-run with delete after download. This will most result in an empty file list since "
+ "the item will be deleted before the output can be fetched.")
+
if json:
os.environ['IDMTOOLS_SUPPRESS_OUTPUT'] = '1'
+ os.environ['IDMTOOLS_DISABLE_PROGRESS_BAR'] = '1'
p: COMPSPlatform = Platform(ctx.obj['config_block'])
- dl_wi = DownloadWorkItem(output_path=output_path)
+ dl_wi = DownloadWorkItem(
+ output_path=output_path,
+ delete_after_download=delete_after_download,
+ extract_after_download=extract_after_download,
+ zip_name=zip_name
+ )
+
if name:
dl_wi.name = name
if pattern:
@@ -145,12 +176,12 @@ def download(
user_logger.error("You must specify at least one item to download")
dl_wi.run(wait_until_done=False, platform=p)
- if not json:
+ if not json and not delete_after_download:
user_logger.info(f"Item can be viewed at {p.get_workitem_link(dl_wi)}")
if wait:
dl_wi.wait(wait_on_done_progress=wait)
if dl_wi.succeeded:
- if dl_wi.dry_run:
+ if dl_wi.dry_run and not delete_after_download:
file = p.get_files(dl_wi, ['file_list.json'])
file = file['file_list.json'].decode('utf-8')
if json:
@@ -159,7 +190,6 @@ def download(
file = json_parser.loads(file)
user_logger.info(tabulate.tabulate([x.values() for x in file], file[0].keys()))
else:
- # Now
pass
elif dl_wi.failed:
user_logger.error("Download failed. Check logs in COMPS")
@@ -174,27 +204,36 @@ def download(
@click.option('--simulation', default=[], multiple=True, help="Simulation ids to assetize")
@click.option('--work-item', default=[], multiple=True, help="WorkItems ids to assetize")
@click.option('--asset-collection', default=[], multiple=True, help="Asset Collection ids to assetize")
- @click.option('--dry-run/--no-dry-run', default=False, help="Gather a list of files that would be assetized instead of actually assetizing")
+ @click.option('--dry-run/--no-dry-run', default=False,
+ help="Gather a list of files that would be assetized instead of actually assetizing")
@click.option('--wait/--no-wait', default=True, help="Wait on item to finish")
- @click.option('--include-assets/--no-include-assets', default=False, help="Scan common assets of WorkItems and Experiments when filtering")
+ @click.option('--include-assets/--no-include-assets', default=False,
+ help="Scan common assets of WorkItems and Experiments when filtering")
@click.option('--verbose/--no-verbose', default=True, help="Enable verbose output in worker")
@click.option('--json/--no-json', default=False, help="Outputs File list as JSON when used with dry run")
- @click.option('--simulation-prefix-format-str', default=None, help="Simulation Prefix Format str. Defaults to '{simulation.id}'. For no prefix, pass a empty string")
+ @click.option('--simulation-prefix-format-str', default=None,
+ help="Simulation Prefix Format str. Defaults to '{simulation.id}'. For no prefix, pass a empty string")
@click.option('--work-item-prefix-format-str', default=None, help="WorkItem Prefix Format str. Defaults to ''")
- @click.option('--tag', default=[], type=(str, str), multiple=True, help="Tags to add to the created asset collection as pairs.")
+ @click.option('--tag', default=[], type=(str, str), multiple=True,
+ help="Tags to add to the created asset collection as pairs.")
@click.option('--name', default=None, help="Name of AssetizeWorkitem. If not provided, one will be generated")
@click.option('--id-file/--no-id-file', default=False, help="Enable or disable writing out an id file")
- @click.option('--id-filename', default=None, help="Name of ID file to save build as. Required when id file is enabled")
+ @click.option('--id-filename', default=None,
+ help="Name of ID file to save build as. Required when id file is enabled")
@click.pass_context
def assetize_outputs(
- ctx: click.Context, pattern, exclude_pattern, experiment, simulation, work_item, asset_collection, dry_run, wait,
- include_assets, verbose, json, simulation_prefix_format_str, work_item_prefix_format_str, tag, name, id_file, id_filename
+ ctx: click.Context, pattern, exclude_pattern, experiment, simulation, work_item, asset_collection, dry_run,
+ wait,
+ include_assets, verbose, json, simulation_prefix_format_str, work_item_prefix_format_str, tag, name,
+ id_file, id_filename
):
- if id_file:
- if id_filename is None:
- raise ValueError("--id-filename is required when filename is not provided")
+
+ if id_file and id_filename is None:
+ user_logger.error("--id-filename is required when filename is not provided")
+ sys.exit(-1)
if json:
os.environ['IDMTOOLS_SUPPRESS_OUTPUT'] = '1'
+ os.environ['IDMTOOLS_DISABLE_PROGRESS_BAR'] = '1'
p: COMPSPlatform = Platform(ctx.obj['config_block'])
ao = AssetizeOutput()
@@ -256,6 +295,67 @@ def assetize_outputs(
ao.fetch_error()
sys.exit(-1)
+ @comps.command()
+ @click.argument('requirement', type=click.Path(exists=True), required=False)
+ @click.option('--asset_tag', multiple=True, help="Tag to be added to AC. Format: 'key:value'")
+ @click.option('--pkg', multiple=True, help="Package for override. Format: 'key==value'")
+ @click.option('--wheel', multiple=True, help="Local wheel file")
+ @click.pass_context
+ def req2ac(ctx: click.Context, requirement: str = None, asset_tag: Optional[List[str]] = None,
+ pkg: Optional[List[str]] = None,
+ wheel: Optional[List[str]] = None):
+ """
+ \b
+ Create ac from requirement file
+ Args:
+ asset_tag: tag to be added to ac
+ pkg: package name (along with version)
+ wheel: package wheel file
+ """
+ from idmtools_platform_comps.utils.python_requirements_ac.requirements_to_asset_collection import \
+ RequirementsToAssetCollection
+
+ pkg_list = list(pkg)
+ wheel_list = [os.path.abspath(w) for w in wheel]
+ tags = dict()
+ for t in asset_tag:
+ parts = t.split(':')
+ tags[parts[0]] = parts[1]
+
+ p: COMPSPlatform = Platform(ctx.obj['config_block'])
+ pl = RequirementsToAssetCollection(p, requirements_path=requirement, pkg_list=pkg_list,
+ local_wheels=wheel_list, asset_tags=tags)
+ ac_id = pl.run()
+ print(ac_id)
+
+ @comps.command()
+ @click.argument('requirement', type=click.Path(exists=True), required=False)
+ @click.option('--pkg', multiple=True, help="Package used for override. Format: say, 'key==value'")
+ @click.option('--wheel', multiple=True, help="Local wheel file")
+ @click.pass_context
+ def ac_exist(ctx: click.Context, requirement: str = None, pkg: Optional[List[str]] = None,
+ wheel: Optional[List[str]] = None):
+ """
+ \b
+ Check ac existing based on requirement file
+ Args:
+ pkg: package name (along with version)
+ wheel: package wheel file
+ """
+ from idmtools_platform_comps.utils.python_requirements_ac.requirements_to_asset_collection import \
+ RequirementsToAssetCollection
+
+ pkg_list = list(pkg)
+ wheel_list = [os.path.abspath(w) for w in wheel]
+ p: COMPSPlatform = Platform(ctx.obj['config_block'])
+ pl = RequirementsToAssetCollection(p, requirements_path=requirement, pkg_list=pkg_list, local_wheels=wheel_list)
+ # Check if ac with md5 exists
+ ac = pl.retrieve_ac_by_tag()
+ if ac:
+ print("AC exist: ", ac.id)
+ else:
+ print("AC doesn't exist")
+
@comps.group(help="Singularity commands")
def singularity():
pass
@@ -267,18 +367,26 @@ def singularity():
@click.option('--transient-input-glob', default=[], multiple=True, help="Transient Files Glob Patterns")
@click.argument('definition_file')
@click.option('--wait/--no-wait', default=True, help="Wait on item to finish")
- @click.option('--tag', default=[], type=(str, str), multiple=True, help="Extra Tags as Value Pairs for the Resulting AC")
- @click.option('--workitem-tag', default=[], type=(str, str), multiple=True, help="Extra Tags as Value Pairs for the WorkItem")
+ @click.option('--tag', default=[], type=(str, str), multiple=True,
+ help="Extra Tags as Value Pairs for the Resulting AC")
+ @click.option('--workitem-tag', default=[], type=(str, str), multiple=True,
+ help="Extra Tags as Value Pairs for the WorkItem")
@click.option('--name', default=None, help="Name of WorkItem. If not provided, one will be generated")
@click.option('--force/--no-force', default=False, help="Force build, ignoring build context")
@click.option('--image-name', default=None, help="Name of resulting image")
- @click.option('--id-file/--no-id-file', default=True, help="Enable or disable writing out an ID file that points to the created asset collection")
- @click.option('--id-filename', default=None, help="Name of ID file to save build as. If not specified, and id-file is enabled, a name is calculated")
- @click.option('--id-workitem/--no-id-workitem', default=True, help="Enable or disable writing out an id file for the workitem")
- @click.option('--id-workitem-failed/--no-id-workitem-failed', default=False, help="Write id of the workitem even if it failed. You need to enable --id-workitem for this is be active")
- @click.option('--id-workitem-filename', default=None, help="Name of ID file to save workitem to. You need to enable --id-workitem for this is be active")
+ @click.option('--id-file/--no-id-file', default=True,
+ help="Enable or disable writing out an ID file that points to the created asset collection")
+ @click.option('--id-filename', default=None,
+ help="Name of ID file to save build as. If not specified, and id-file is enabled, a name is calculated")
+ @click.option('--id-workitem/--no-id-workitem', default=True,
+ help="Enable or disable writing out an id file for the workitem")
+ @click.option('--id-workitem-failed/--no-id-workitem-failed', default=False,
+ help="Write id of the workitem even if it failed. You need to enable --id-workitem for this is be active")
+ @click.option('--id-workitem-filename', default=None,
+ help="Name of ID file to save workitem to. You need to enable --id-workitem for this is be active")
@click.pass_context
- def build(ctx: click.Context, common_input, common_input_glob, transient_input, transient_input_glob, definition_file, wait, tag, workitem_tag, name, force, image_name: str,
+ def build(ctx: click.Context, common_input, common_input_glob, transient_input, transient_input_glob,
+ definition_file, wait, tag, workitem_tag, name, force, image_name: str,
id_file: str, id_filename: str, id_workitem: bool, id_workitem_failed: bool, id_workitem_filename: str):
p: COMPSPlatform = Platform(ctx.obj['config_block'])
sb = SingularityBuildWorkItem(definition_file=definition_file, name=name, force=force, image_name=image_name)
@@ -311,7 +419,8 @@ def build(ctx: click.Context, common_input, common_input_glob, transient_input,
if id_workitem:
# TODO when we should use platform id but that need to be updated through the code base
if sb.succeeded and sb._uid is None:
- user_logger.warning("Cannot save workitem id because an existing container was found with the same inputs. You can force run using --force, but it is recommended to use the container used.")
+ user_logger.warning(
+ "Cannot save workitem id because an existing container was found with the same inputs. You can force run using --force, but it is recommended to use the container used.")
elif id_workitem_failed or sb.succeeded:
if id_workitem_filename is None:
id_workitem_filename = sb.get_id_filename(prefix="builder.")
@@ -322,15 +431,26 @@ def build(ctx: click.Context, common_input, common_input_glob, transient_input,
@singularity.command(help="Pull Singularity Image")
@click.argument('image_url')
@click.option('--wait/--no-wait', default=True, help="Wait on item to finish")
- @click.option('--tag', default=[], type=(str, str), multiple=True, help="Extra Tags as Value Pairs for the Resulting AC")
- @click.option('--workitem-tag', default=[], type=(str, str), multiple=True, help="Extra Tags as Value Pairs for the WorkItem")
+ @click.option('--tag', default=[], type=(str, str), multiple=True,
+ help="Extra Tags as Value Pairs for the Resulting AC")
+ @click.option('--workitem-tag', default=[], type=(str, str), multiple=True,
+ help="Extra Tags as Value Pairs for the WorkItem")
@click.option('--name', default=None, help="Name of WorkItem. If not provided, one will be generated")
@click.option('--force/--no-force', default=False, help="Force build, ignoring build context")
@click.option('--image-name', default=None, help="Name of resulting image")
@click.option('--id-file/--no-id-file', default=True, help="Enable or disable writing out an id file")
- @click.option('--id-filename', default=None, help="Name of ID file to save build as. If not specified, and id-file is enabled, a name is calculated")
+ @click.option('--id-filename', default=None,
+ help="Name of ID file to save build as. If not specified, and id-file is enabled, a name is calculated")
+ @click.option('--id-workitem/--no-id-workitem', default=True,
+ help="Enable or disable writing out an id file for the workitem")
+ @click.option('--id-workitem-failed/--no-id-workitem-failed', default=False,
+ help="Write id of the workitem even if it failed. You need to enable --id-workitem for this is be active")
+ @click.option('--id-workitem-filename', default=None,
+ help="Name of ID file to save workitem to. You need to enable --id-workitem for this is be active")
@click.pass_context
- def pull(ctx: click.Context, image_url, wait, tag, workitem_tag, name, force, image_name: str, id_file: str, id_filename: str):
+ def pull(ctx: click.Context, image_url, wait, tag, workitem_tag, name, force, image_name: str, id_file: str,
+ id_filename: str,
+ id_workitem: bool, id_workitem_failed: bool, id_workitem_filename: str):
p: COMPSPlatform = Platform(ctx.obj['config_block'])
sb = SingularityBuildWorkItem(image_url=image_url, force=force, image_name=image_name)
sb.name = f"Pulling {image_url}" if name is None else name
@@ -345,9 +465,21 @@ def pull(ctx: click.Context, image_url, wait, tag, workitem_tag, name, force, im
sb.run(wait_until_done=wait, platform=p)
if sb.succeeded and id_file:
+ if id_filename is None:
+ id_filename = sb.get_id_filename()
user_logger.info(f"Saving ID to {id_filename}")
- sb.to_id_file(id_filename, save_platform=True)
+ sb.asset_collection.to_id_file(id_filename, save_platform=True)
+
+ if id_workitem and sb.succeeded and sb._uid is None:
+ user_logger.warning(
+ "Cannot save workitem id because an existing container was found with the same inputs. You can force run using --force, but it is recommended to use the container used.")
+ elif id_workitem_failed or sb.succeeded:
+ if id_workitem_filename is None:
+ id_workitem_filename = sb.get_id_filename(prefix="builder.")
+ user_logger.info(f"Saving the Builder Workitem ID that contains the image to {id_workitem_filename}")
+ sb.to_id_file(id_workitem_filename, save_platform=True)
sys.exit(0 if sb.succeeded else -1)
except ImportError as e:
- logger.warning(f"COMPS CLI not enabled because a dependency is missing. Most likely it is either click or idmtools cli {e.args}")
+ logger.warning(
+ f"COMPS CLI not enabled because a dependency is missing. Most likely it is either click or idmtools cli {e.args}")
diff --git a/idmtools_platform_comps/idmtools_platform_comps/comps_operations/experiment_operations.py b/idmtools_platform_comps/idmtools_platform_comps/comps_operations/experiment_operations.py
index 37e108ab6..c10d40716 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/comps_operations/experiment_operations.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/comps_operations/experiment_operations.py
@@ -110,23 +110,23 @@ def platform_create(self, experiment: Experiment, num_cores: Optional[int] = Non
if use_short_path:
logger.debug("Setting Simulation Root to $COMPS_PATH(USER)")
simulation_root = "$COMPS_PATH(USER)"
- subdirectory = 'rac' + '_' + timestamp() # also shorten subdirectory
+ subdirectory = 'rac' + '_' + timestamp() # also shorten subdirectory
else:
simulation_root = self.platform.simulation_root
# Get the experiment command line
exp_command: CommandLine = self._get_experiment_command_line(check_command, experiment)
- if command_arg is None:
+ if command_arg is None and exp_command is not None:
command_arg = exp_command.arguments + " " + exp_command.options
- if executable_path is None:
+ if executable_path is None and exp_command is not None:
executable_path = exp_command.executable
# create initial configuration object
comps_config = dict(
environment_name=self.platform.environment,
- simulation_input_args=command_arg.strip(),
+ simulation_input_args=command_arg.strip() if command_arg is not None else None,
working_directory_root=os.path.join(simulation_root, subdirectory).replace('\\', '/'),
executable_path=executable_path,
node_group_name=self.platform.node_group,
@@ -137,8 +137,17 @@ def platform_create(self, experiment: Experiment, num_cores: Optional[int] = Non
exclusive=self.platform.exclusive
)
+ if kwargs.get("scheduling", False):
+ import copy
+ # save a copy of default config
+ setattr(self.platform, 'comps_config', copy.deepcopy(comps_config))
+ # clear some not-supported parameters
+ comps_config.update(executable_path=None, node_group_name=None, min_cores=None, max_cores=None,
+ exclusive=None, simulation_input_args=None)
+
if logger.isEnabledFor(DEBUG):
logger.debug(f'COMPS Experiment Configs: {str(comps_config)}')
+
config = Configuration(**comps_config)
e = COMPSExperiment(name=experiment.name,
@@ -159,7 +168,8 @@ def platform_create(self, experiment: Experiment, num_cores: Optional[int] = Non
self.send_assets(experiment)
return e
- def platform_modify_experiment(self, experiment: Experiment, regather_common_assets: bool = False, **kwargs) -> Experiment:
+ def platform_modify_experiment(self, experiment: Experiment, regather_common_assets: bool = False,
+ **kwargs) -> Experiment:
"""
Executed when an Experiment is being ran that is already in Created, Done, In Progress, or Failed State
Args:
@@ -173,7 +183,8 @@ def platform_modify_experiment(self, experiment: Experiment, regather_common_ass
experiment.pre_creation(self.platform, gather_assets=regather_common_assets)
self.send_assets(experiment)
else:
- user_logger.warning(f"Not gathering common assets again since experiment exists on platform. If you need to add additional common assets, see {get_doc_base_url()}cookbook/asset_collections.html#modifying-asset-collection")
+ user_logger.warning(
+ f"Not gathering common assets again since experiment exists on platform. If you need to add additional common assets, see {get_doc_base_url()}cookbook/asset_collections.html#modifying-asset-collection")
return experiment
def _get_experiment_command_line(self, check_command: bool, experiment: Experiment) -> CommandLine:
@@ -188,6 +199,7 @@ def _get_experiment_command_line(self, check_command: bool, experiment: Experime
Command line for Experiment
"""
from idmtools_platform_comps.utils.python_version import platform_task_hooks
+
if isinstance(experiment.simulations, Generator):
if logger.isEnabledFor(DEBUG):
logger.debug("Simulations generator detected. Copying generator and using first task as command")
@@ -199,7 +211,8 @@ def _get_experiment_command_line(self, check_command: bool, experiment: Experime
# run pre-creation in case task use it to produce the command line dynamically
task.pre_creation(sim, self.platform)
exp_command = task.command
- elif isinstance(experiment.simulations, ExperimentParentIterator) and isinstance(experiment.simulations.items, TemplatedSimulations):
+ elif isinstance(experiment.simulations, ExperimentParentIterator) and isinstance(experiment.simulations.items,
+ TemplatedSimulations):
if logger.isEnabledFor(DEBUG):
logger.debug("ParentIterator/TemplatedSimulations detected. Using base_task for command")
from idmtools.entities.simulation import Simulation
@@ -289,7 +302,8 @@ def platform_run_item(self, experiment: Experiment, **kwargs):
logger.debug(f'Commissioning experiment: {experiment.uid}')
# commission only if rules we have items in created or none.
# TODO add new status to entity status to track commissioned as well instead of raw comps
- if any([s.status in [None, EntityStatus.CREATED] for s in experiment.simulations]) and any([s.get_platform_object().state in [SimulationState.Created] for s in experiment.simulations]):
+ if any([s.status in [None, EntityStatus.CREATED] for s in experiment.simulations]) and any(
+ [s.get_platform_object().state in [SimulationState.Created] for s in experiment.simulations]):
po = experiment.get_platform_object()
po.commission()
# for now, we update here in the comps objects to reflect the new state
@@ -312,8 +326,8 @@ def send_assets(self, experiment: Experiment, **kwargs):
if experiment.assets.count == 0:
logger.warning('Experiment has no assets to send')
return
-
ac = self.platform._assets.create(experiment.assets)
+
if logger.isEnabledFor(DEBUG):
logger.debug(f'Asset collection for experiment: {experiment.id} is: {ac.id}')
diff --git a/idmtools_platform_comps/idmtools_platform_comps/comps_operations/simulation_operations.py b/idmtools_platform_comps/idmtools_platform_comps/comps_operations/simulation_operations.py
index a4a921de8..80e718d30 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/comps_operations/simulation_operations.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/comps_operations/simulation_operations.py
@@ -24,6 +24,7 @@
from idmtools.entities.simulation import Simulation
from idmtools.utils.json import IDMJSONEncoder
from idmtools_platform_comps.utils.general import convert_comps_status, get_asset_for_comps_item, clean_experiment_name
+from idmtools_platform_comps.utils.scheduling import scheduled
if TYPE_CHECKING: # pragma: no cover
from idmtools_platform_comps.comps_platform import COMPSPlatform
@@ -175,6 +176,7 @@ def to_comps_sim(self, simulation: Simulation, num_cores: int = None, priority:
kwargs['asset_collection_id'] = asset_collection_id
kwargs.update(simulation._platform_kwargs)
config = self.get_simulation_config_from_simulation(simulation, **kwargs)
+
if simulation.name:
simulation.name = clean_experiment_name(simulation.name)
s = COMPSSimulation(
@@ -183,7 +185,7 @@ def to_comps_sim(self, simulation: Simulation, num_cores: int = None, priority:
configuration=config
)
- self.send_assets(simulation, s)
+ self.send_assets(simulation, s, **kwargs)
s.set_tags(simulation.tags)
simulation._platform_object = s
return s
@@ -204,11 +206,20 @@ def get_simulation_config_from_simulation(self, simulation: Simulation, num_core
Returns:
Configuration
"""
+ global_scheduling = kwargs.get("scheduling", False)
+ sim_scheduling = getattr(simulation, 'scheduling', False)
+ scheduling = global_scheduling and sim_scheduling
+
comps_configuration = dict()
+ if global_scheduling:
+ config = getattr(self.platform, 'comps_config', {})
+ comps_exp_config = Configuration(**config)
+ else:
+ comps_exp: COMPSExperiment = simulation.parent.get_platform_object()
+ comps_exp_config: Configuration = comps_exp.configuration
+
if asset_collection_id:
comps_configuration['asset_collection_id'] = asset_collection_id
- comps_exp: COMPSExperiment = simulation.parent.get_platform_object()
- comps_exp_config: Configuration = comps_exp.configuration
if num_cores is not None and num_cores != comps_exp_config.max_cores:
logger.info(f'Overriding cores for sim to {num_cores}')
comps_configuration['max_cores'] = num_cores
@@ -228,6 +239,10 @@ def get_simulation_config_from_simulation(self, simulation: Simulation, num_core
comps_configuration['simulation_input_args'] = sim_task
if logger.isEnabledFor(DEBUG):
logger.debug(f'Simulation config: {str(comps_configuration)}')
+ if scheduling:
+ comps_configuration.update(executable_path=None, node_group_name=None, min_cores=None, max_cores=None,
+ exclusive=None, simulation_input_args=None)
+
return Configuration(**comps_configuration)
def batch_create(self, simulations: List[Simulation], num_cores: int = None, priority: str = None, asset_collection_id: Union[str, UUID] = None, **kwargs) -> \
@@ -298,10 +313,15 @@ def send_assets(self, simulation: Simulation, comps_sim: Optional[COMPSSimulatio
Returns:
None
"""
+ scheduling = kwargs.get("scheduling", False) and scheduled(simulation)
+
if comps_sim is None:
comps_sim = simulation.get_platform_object()
for asset in simulation.assets:
- comps_sim.add_file(simulationfile=SimulationFile(asset.filename, 'input'), data=asset.bytes)
+ if asset.filename.lower() == 'workorder.json' and scheduling:
+ comps_sim.add_file(simulationfile=SimulationFile(asset.filename, 'WorkOrder'), data=asset.bytes)
+ else:
+ comps_sim.add_file(simulationfile=SimulationFile(asset.filename, 'input'), data=asset.bytes)
# add metadata
if add_metadata:
diff --git a/idmtools_platform_comps/idmtools_platform_comps/comps_platform.py b/idmtools_platform_comps/idmtools_platform_comps/comps_platform.py
index adaabd06f..29bbeffd3 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/comps_platform.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/comps_platform.py
@@ -113,3 +113,6 @@ def is_windows_platform(self, item: IEntity = None) -> bool:
if isinstance(item, IWorkflowItem):
return False
return super().is_windows_platform(item)
+
+
+
diff --git a/idmtools_platform_comps/idmtools_platform_comps/utils/download/download.py b/idmtools_platform_comps/idmtools_platform_comps/utils/download/download.py
index 5177c6c4a..f6353a449 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/utils/download/download.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/utils/download/download.py
@@ -8,6 +8,8 @@
from uuid import UUID
from COMPS.Data import WorkItem
from tqdm import tqdm
+
+from idmtools import IdmConfigParser
from idmtools.assets.file_list import FileList
from idmtools.core import EntityStatus
from idmtools.entities.iplatform import IPlatform
@@ -84,16 +86,18 @@ def wait(self, wait_on_done_progress: bool = True, timeout: int = None, refresh_
if self._uid:
oi = po.retrieve_output_file_info([self.zip_name])
zip_name = PurePath(self.output_path).joinpath(self.zip_name)
- with tqdm(total=oi[0].length, unit='B', unit_scale=True, unit_divisor=1024) as pbar:
+ with tqdm(total=oi[0].length, unit='B', unit_scale=True, unit_divisor=1024, desc="Downloading Files") as pbar:
self.__download_file(oi, pbar, zip_name)
if self.extract_after_download:
self.__extract_output(zip_name)
if self.delete_after_download:
if self.extract_after_download:
- user_logger.debug(f"Removing {zip_name}")
+ if IdmConfigParser.is_output_enabled():
+ user_logger.debug(f"Removing {zip_name}")
os.remove(zip_name)
- user_logger.debug(f'Deleting workitem {self.uid}')
+ if IdmConfigParser.is_output_enabled():
+ user_logger.debug(f'Deleting workitem {self.uid}')
po.delete()
self.uid = None
diff --git a/idmtools_platform_comps/idmtools_platform_comps/utils/package_version.py b/idmtools_platform_comps/idmtools_platform_comps/utils/package_version.py
index 188006fc9..17d3f1ad3 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/utils/package_version.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/utils/package_version.py
@@ -1,10 +1,11 @@
import functools
import operator
import json
+import os
import re
from abc import ABC
from datetime import datetime
-from logging import getLogger
+from logging import getLogger, DEBUG
from typing import Optional, List, Type
from urllib import request
import requests
@@ -12,6 +13,7 @@
from packaging.version import parse
from html.parser import HTMLParser
+PKG_PYPI = 'https://pypi.python.org/pypi/{}/json'
PYPI_PRODUCTION_SIMPLE = 'https://packages.idmod.org/artifactory/api/pypi/pypi-production/simple'
IDM_DOCKER_PROD = 'https://packages.idmod.org/artifactory/list/docker-production'
@@ -213,6 +215,35 @@ def fetch_versions_from_server(pkg_url: str, parser: Type[PackageHTMLParser] = L
return all_releases
+def fetch_versions_from_artifactory(pkg_name: str, parser: Type[PackageHTMLParser] = LinkHTMLParser) -> List[str]:
+ """
+ Fetch all versions from server
+
+ Args:
+ pkg_url: Url to fetch
+ parser: Parser tp use
+ Returns:
+
+ """
+ pkg_path = IDM_DOCKER_PROD
+ pkg_url = os.path.join(pkg_path, pkg_name)
+
+ resp = requests.get(pkg_url)
+ if resp.status_code != 200:
+ logger.warning('Could not fetch URL')
+ return None
+
+ html_str = resp.text
+
+ parser = parser()
+ parser.feed(html_str)
+ releases = parser.pkg_version
+ releases = [v for v in releases if not v.startswith('.')]
+
+ all_releases = sorted(releases, key=parse_version, reverse=True)
+ return all_releases
+
+
@functools.lru_cache(3)
def get_versions_from_site(pkg_url, base_version: Optional[str] = None, display_all=False, parser: Type[PackageHTMLParser] = LinkNameParser, exclude_pre_release: bool = True):
"""
@@ -262,13 +293,176 @@ def get_latest_version_from_site(pkg_url, base_version: Optional[str] = None, di
Returns: the latest version of ven package
"""
+ if logger.isEnabledFor(DEBUG):
+ logger.debug(f"Fetching version from {pkg_url} with base {base_version}")
release_versions = get_versions_from_site(pkg_url, base_version, display_all=display_all, parser=parser, exclude_pre_release=exclude_pre_release)
if base_version:
# only use the longest match latest
version_compatible_portion = ".".join(base_version.split(".")[:2])
+ if logger.isEnabledFor(DEBUG):
+ logger.debug(f"Finding latest of matches for version {base_version} from {release_versions} using {version_compatible_portion}")
for ver in release_versions:
if ".".join(ver.split('.')[:2]) == version_compatible_portion:
return ver
return None
return release_versions[0] if release_versions else None
+
+
+def fetch_package_versions_from_pypi(pkg_name):
+ """
+ Utility to get the latest version for a given package name
+ Args:
+ pkg_name: package name given
+ Returns: the latest version of ven package
+ """
+ url = PKG_PYPI.format(pkg_name)
+ try:
+ releases = json.loads(request.urlopen(url).read())['releases']
+ except Exception:
+ return None
+
+ return releases
+
+
+def fetch_package_versions(pkg_name, is_released=True, sort=True, display_all=False):
+ """
+ Utility to get the latest version for a given package name
+ Args:
+ pkg_name: package name given
+ is_released: get released version only
+ sort: make version sorted or not
+ display_all: determine if output all package releases
+ Returns: the latest version of ven package
+ """
+
+ # First fetch versions from Artifactory
+ pkg_url = "/".join([PYPI_PRODUCTION_SIMPLE, pkg_name])
+ versions = fetch_versions_from_server(pkg_url, parser=LinkNameParser)
+
+ if versions is None:
+ versions = fetch_package_versions_from_pypi(pkg_name)
+
+ if sort:
+ versions = sorted(versions, key=parse_version, reverse=True)
+
+ if is_released:
+ versions = [ver for ver in versions if not parse(ver).is_prerelease]
+
+ if display_all:
+ print(display_all)
+
+ return versions
+
+
+def get_pkg_match_version(pkg_name, base_version=None, test='==', validate=True):
+ """
+ Utility to get the latest version for a given package name
+ Args:
+ pkg_name: package name given
+ base_version: Optional base version. Versions above this will not be added.
+ test: default ==, a filter to find version
+ validate: bool, if True, will validate base_version
+ Returns: the latest version of ven package
+ """
+ # fetch sorted versions
+ versions = fetch_package_versions(pkg_name)
+
+ # Return None if given version list is None or empty
+ if not versions:
+ return None
+
+ # Return the latest version if no base_version is given
+ if base_version is None:
+ return versions[0]
+
+ # Make sure the input is valid
+ if base_version not in versions:
+ if validate:
+ # print(f"Could not find the version of '{version}'.")
+ raise Exception(f"Could not find the version of '{base_version}'.")
+
+ if test == '~=':
+ return get_latest_compatible_version(pkg_name, base_version, versions)
+
+ if test == '==':
+ return base_version
+
+ index = versions.index(base_version)
+
+ if test == '<':
+ return versions[index + 1] if index > -1 else None
+
+ if test == '<=':
+ return versions[index]
+
+ if test == '>':
+ return versions[0]
+
+ if test == '>=':
+ return versions[0]
+
+ if test == '!=':
+ return versions[0] if base_version != versions[0] else versions[1] if len(versions) > 1 else None
+
+ return base_version
+
+
+def get_latest_version(pkg_name):
+ """
+ Utility to get the latest version for a given package name
+ Args:
+ pkg_name: package name given
+ base_version: package version
+ validate: bool, if True, will validate base_version
+ Returns: the latest version of package
+ """
+ # Get sorted package versions
+ versions = fetch_package_versions(pkg_name)
+
+ if versions is None:
+ # print(f"Could not find the version of '{version}'.")
+ raise Exception(f"Could not find the package'{pkg_name}'.")
+
+ # Pick the latest
+ return versions[0]
+
+
+def get_latest_compatible_version(pkg_name, base_version=None, versions=None, validate=True):
+ """
+ Utility to get the latest compatible version from a given version list
+ Args:
+ base_version: Optional base version. Versions above this will not be added.
+ pkg_name: package name given
+ versions: user input of version list
+ validate: bool, if True, will validate base_version
+ Returns: the latest compatible version from versions
+ """
+ if versions is None:
+ versions = fetch_package_versions(pkg_name)
+
+ # Return None if given version list is None or empty
+ if not versions:
+ return None
+
+ # Return the latest version if no base_version is given
+ if base_version is None:
+ return versions[0]
+
+ # Cleanup
+ base_version = base_version.replace('+nightly', '')
+
+ # Make sure the input is valid
+ if base_version not in versions:
+ if validate:
+ # print(f"Could not find the version of '{version}'.")
+ raise Exception(f"Could not find the version of '{base_version}'.")
+
+ # Find all possible candidates
+ v_root = base_version[0: base_version.rindex('.') + 1]
+
+ # Final all candidates
+ candidates = [v for v in versions if v.startswith(v_root)]
+
+ # Pick the latest
+ return candidates[0]
diff --git a/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/create_asset_collection.py b/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/create_asset_collection.py
index 010d88fbc..3bd0f6210 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/create_asset_collection.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/create_asset_collection.py
@@ -24,7 +24,8 @@ def build_asset_file_list(prefix=LIBRARY_ROOT_PREFIX):
for root, _, filenames in os.walk(prefix):
for filename in filenames:
asset = AssetCollectionFile(file_name=os.path.basename(filename),
- relative_path=os.path.join("site-packages", root.replace(prefix, "").strip("/")).strip("/"),
+ relative_path=os.path.join("site-packages",
+ root.replace(prefix, "").strip("/")).strip("/"),
md5_checksum=calculate_md5(os.path.join(root, filename))
)
output.append(asset)
@@ -50,25 +51,21 @@ def get_first_simulation_of_experiment(exp_id):
def main(): # pragma: no cover
print(sys.argv)
- if len(sys.argv) < 4:
+ if len(sys.argv) < 3:
raise Exception(
- "The script needs to be called with `python '.\n{}".format(
+ "The script needs to be called with `python '.\n{}".format(
" ".join(sys.argv)))
# Get the experiments
exp_id = sys.argv[1]
print('exp_id: ', exp_id)
- # Get mds
- md5_str = sys.argv[2]
- print('md5_str: ', md5_str)
-
# Get endpoint
- endpoint = sys.argv[3]
+ endpoint = sys.argv[2]
print('endpoint: ', endpoint)
# Platform key
- os_target = sys.argv[4]
+ os_target = sys.argv[3]
print('os: ', os_target)
client = Client()
@@ -85,10 +82,25 @@ def main(): # pragma: no cover
# Output files
max_files = 10
- print('Display the first 10 files:\n', "\n".join([f"{a.relative_path}/{a.file_name}" for a in asset_files[0:max_files]]))
+ print('Display the first 10 files:\n',
+ "\n".join([f"{a.relative_path}/{a.file_name}" for a in asset_files[0:max_files]]))
+
+ # Retrieve experiment's tags
+ comps_exp = Experiment.get(exp_id, QueryCriteria().select_children('tags'))
+ exp_tags = comps_exp.tags
+
+ # Retrieve experiment's tags
+ _reserved_tag = ['idmtools', 'task_type', MD5_KEY.format(os_target)]
+ comps_exp = Experiment.get(exp_id, QueryCriteria().select_children('tags'))
+ user_tags = {key: value for key, value in comps_exp.tags.items() if key not in _reserved_tag}
+
+ # Get md5_str
+ md5_str = exp_tags.get(MD5_KEY.format(os_target), None)
+ # Collect ac's tags
ac = AssetCollection()
tags = {MD5_KEY.format(os_target): md5_str}
+ tags.update(user_tags)
ac.set_tags(tags)
# Create asset collection
@@ -116,7 +128,8 @@ def main(): # pragma: no cover
ac2.add_asset(acf)
print("\n\n\n=====================\nUploading files not in comps: " + "\n".join(
- [f"{a.relative_path}/{a.file_name}" for a in ac2.assets if a.md5_checksum in missing_files or a.md5_checksum is None]))
+ [f"{a.relative_path}/{a.file_name}" for a in ac2.assets if
+ a.md5_checksum in missing_files or a.md5_checksum is None]))
sys.stdout.flush()
ac2.save()
diff --git a/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/requirements_to_asset_collection.py b/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/requirements_to_asset_collection.py
index c9e01f91b..54a940676 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/requirements_to_asset_collection.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/utils/python_requirements_ac/requirements_to_asset_collection.py
@@ -1,5 +1,5 @@
-import hashlib
import os
+import hashlib
from dataclasses import dataclass, field
from logging import getLogger, DEBUG
from typing import List
@@ -30,29 +30,26 @@ class RequirementsToAssetCollection:
pkg_list: list = field(default=None)
#: list of wheel files locally to upload and install
local_wheels: list = field(default=None)
+ # User tags
+ asset_tags: dict = field(default=None)
#: Internal checksum to calculate unique requirements set has be ran before
_checksum: str = field(default=None, init=False)
#: Calculated requirements including versions
_requirements: List[str] = field(default=None, init=False)
#: Since requirements vary by os, target it on the platform as well
_os_target: str = field(default=None, init=False)
+ #: Reserved tags
+ __reserved_tag: list = field(default=None, init=False)
def __post_init__(self):
if not any([self.requirements_path, self.pkg_list, self.local_wheels]):
raise ValueError(
- "Impossible to proceed without either requirements path or with package list or local wheels!")
-
- if self.platform is None:
- # Try to detect platform
- from idmtools.core.context import get_current_platform
- p = get_current_platform()
- if p is not None:
- self.platform = p
+ "Impossible to proceed without either requirements path or package list or local wheels!")
self.requirements_path = os.path.abspath(self.requirements_path) if self.requirements_path else None
self.pkg_list = self.pkg_list or []
self.local_wheels = [os.path.abspath(whl) for whl in self.local_wheels] if self.local_wheels else []
- self._os_target = "win" if "slurm" not in self.platform.environment.lower() and self.platform.environment.lower() not in SLURM_ENVS else "linux"
+ self.asset_tags = self.asset_tags or {}
@property
def checksum(self):
@@ -66,6 +63,15 @@ def checksum(self):
return self._checksum
+ @property
+ def md5_tag(self):
+ """
+ Returns:
+ The md5 tag.
+ """
+ self.init_platform()
+ return {MD5_KEY.format(self._os_target): self.checksum}
+
@property
def requirements(self):
"""
@@ -77,6 +83,17 @@ def requirements(self):
return self._requirements
+ def init_platform(self):
+ if self.platform is None:
+ # Try to detect platform
+ from idmtools.core.context import get_current_platform
+ p = get_current_platform()
+ if p is not None:
+ self.platform = p
+
+ self._os_target = "win" if "slurm" not in self.platform.environment.lower() and self.platform.environment.lower() not in SLURM_ENVS else "linux"
+ self.__reserved_tag = ['idmtools', 'task_type', MD5_KEY.format(self._os_target)]
+
def run(self, rerun=False):
"""
The working logic of this utility:
@@ -86,6 +103,10 @@ def run(self, rerun=False):
Returns: return ac id based on the requirements if Experiment and WorkItem Succeeded
"""
+
+ # Late validation
+ self.init_platform()
+
# Check if ac with md5 exists
ac = self.retrieve_ac_by_tag()
@@ -124,7 +145,8 @@ def save_updated_requirements(self):
Returns:
"""
- user_logger.info(f"Creating an updated requirements file ensuring all versions are specified at {REQUIREMENT_FILE}")
+ user_logger.info(
+ f"Creating an updated requirements file ensuring all versions are specified at {REQUIREMENT_FILE}")
req_content = '\n'.join(self.requirements)
with open(REQUIREMENT_FILE, 'w') as outfile:
outfile.write(req_content)
@@ -136,13 +158,17 @@ def retrieve_ac_by_tag(self, md5_check=None):
md5_check: also can use custom md5 string as search tag
Returns: comps asset collection
"""
+ # Late validation
+ self.init_platform()
+
md5_str = md5_check or self.checksum
if logger.isEnabledFor(DEBUG):
logger.debug(f'md5_str: {md5_str}')
# check if ac with tag idmtools-requirements-md5 = my_md5 exists
ac_list = COMPSAssetCollection.get(
- query_criteria=QueryCriteria().select_children('tags').where_tag([f'{MD5_KEY.format(self._os_target)}={md5_str}']))
+ query_criteria=QueryCriteria().select_children('tags').where_tag(
+ [f'{MD5_KEY.format(self._os_target)}={md5_str}']))
# if exists, get ac and return it
if len(ac_list) > 0:
@@ -187,10 +213,22 @@ def run_experiment_to_install_lib(self):
task = JSONConfiguredPythonTask(script_path=os.path.join(CURRENT_DIRECTORY, MODEL_LOAD_LIB))
experiment = Experiment(name=exp_name, simulations=[task.to_simulation()])
experiment.add_asset(Asset(REQUIREMENT_FILE))
- experiment.tags = {MD5_KEY.format(self._os_target): self.checksum}
+ experiment.tags = self.md5_tag
+
+ # Avoid conflict to reserved tag
+ if len(set(self.asset_tags).intersection(self.__reserved_tag)) > 0:
+ raise Exception(f"{self.__reserved_tag} are reserved tags, please use other tags!")
+
+ # Remove conflicts in case
+ for tag in self.__reserved_tag:
+ self.asset_tags.pop(tag, None)
+
+ # Update experiment's tags
+ experiment.tags.update(self.asset_tags)
+
self.add_wheels_to_assets(experiment)
user_logger.info("Run install of python requirements on COMPS. To view the details, see the experiment below")
- experiment.run(wait_until_done=True, platform=self.platform, use_short_path=True)
+ experiment.run(wait_until_done=True, platform=self.platform, use_short_path=True, num_cores=1)
if experiment.succeeded:
return experiment
@@ -209,11 +247,17 @@ def run_wi_to_create_ac(self, exp_id):
logger.debug(f'md5_str: {md5_str}')
wi_name = "wi to create ac"
- command = f"python3 {MODEL_CREATE_AC} {exp_id} {md5_str} {self.platform.endpoint} {self._os_target}"
+ command = f"python3 {MODEL_CREATE_AC} {exp_id} {self.platform.endpoint} {self._os_target}"
+
+ # Update tags
tags = {MD5_KEY.format(self._os_target): self.checksum}
+ tags.update(self.asset_tags)
- user_logger.info("Converting Python Packages to an Asset Collection. This may take some time for large dependency lists")
- wi = SSMTWorkItem(name=wi_name, command=command, transient_assets=AssetCollection([os.path.join(CURRENT_DIRECTORY, MODEL_CREATE_AC)]), tags=tags, related_experiments=[exp_id])
+ user_logger.info(
+ "Converting Python Packages to an Asset Collection. This may take some time for large dependency lists")
+ wi = SSMTWorkItem(name=wi_name, command=command,
+ transient_assets=AssetCollection([os.path.join(CURRENT_DIRECTORY, MODEL_CREATE_AC)]),
+ tags=tags, related_experiments=[exp_id])
wi.run(wait_on_done=True, platform=self.platform)
@@ -233,28 +277,6 @@ def run_wi_to_create_ac(self, exp_id):
except: # noqa: E722
pass
- @staticmethod
- def get_latest_version(pkg_name, display_all=False):
- """
- Utility to get the latest version for a given package name
- Args:
- pkg_name: package name given
- display_all: determine if output all package releases
- Returns: the latest version of ven package
- """
- from idmtools_platform_comps.utils.package_version import get_latest_package_version_from_pypi
- from idmtools_platform_comps.utils.package_version import get_latest_pypi_package_version_from_artifactory
-
- latest_version = get_latest_pypi_package_version_from_artifactory(pkg_name, display_all)
-
- if not latest_version:
- latest_version = get_latest_package_version_from_pypi(pkg_name, display_all)
-
- if not latest_version:
- raise Exception(f"Failed to retrieve the latest version of '{pkg_name}'.")
-
- return latest_version
-
def consolidate_requirements(self):
"""
Combine requirements and dynamic requirements (a list):
@@ -264,6 +286,7 @@ def consolidate_requirements(self):
Returns: the consolidated requirements (as a list)
"""
import pkg_resources
+ from idmtools_platform_comps.utils.package_version import get_pkg_match_version
req_dict = {}
comment_list = []
@@ -288,17 +311,20 @@ def consolidate_requirements(self):
req_dict[req.name] = req.specs
missing_version_dict = {k: v for k, v in req_dict.items() if len(v) == 0 or v[0][1] == ''}
- has_version_dict = {k: v for k, v in req_dict.items() if k not in missing_version_dict}
- update_req_list = []
- for k, v in has_version_dict.items():
- update_req_list.append(f'{k}=={v[0][1]}')
-
- for k in missing_version_dict.keys():
- latest = self.get_latest_version(k)
- update_req_list.append(f"{k}=={latest}")
+ req_list = []
+ for k, v in req_dict.items():
+ pkg_name = k
+ base_version = None if k in missing_version_dict else v[0][1]
+ test = '==' if k in missing_version_dict else v[0][0]
+ req_list.append(f'{pkg_name}=={get_pkg_match_version(pkg_name, base_version, test)}')
+ wheel_list = []
if self.local_wheels:
- update_req_list.extend([f"Assets/{os.path.basename(whl)}" for whl in self.local_wheels])
+ wheel_list.extend([f"Assets/{os.path.basename(whl)}" for whl in self.local_wheels])
+
+ req_list = sorted(req_list, reverse=False)
+ wheel_list = sorted(wheel_list, reverse=False)
+ update_req_list = req_list + wheel_list
return update_req_list
diff --git a/idmtools_platform_comps/idmtools_platform_comps/utils/scheduling.py b/idmtools_platform_comps/idmtools_platform_comps/utils/scheduling.py
new file mode 100644
index 000000000..e0ee073ab
--- /dev/null
+++ b/idmtools_platform_comps/idmtools_platform_comps/utils/scheduling.py
@@ -0,0 +1,117 @@
+import json
+from os import PathLike
+from typing import List, Union
+from idmtools.assets import Asset
+from idmtools.entities.experiment import Experiment
+from idmtools.entities.simulation import Simulation
+from idmtools.entities.templated_simulation import TemplatedSimulations
+from logging import DEBUG
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+# utility function to add updated WorkOrder.json to each simulation as linked file via simulation task
+# first loads original workorder file from local, then update Command field in it from each simulation object's
+# simulation.task.command.cmd, then write updated command to WorkOrder.json, and load this file to simulation
+def default_add_workerorder_sweep_callback(simulation, file_name, file_path):
+ add_work_order(simulation, file_name=file_name, file_path=file_path)
+
+
+def default_add_schedule_config_sweep_callback(simulation, command: str = None,
+ node_group_name: str = 'idm_cd', num_cores: int = 1, **config_opts):
+ add_schedule_config(simulation, command=command, node_group_name=node_group_name, num_cores=num_cores,
+ **config_opts["config_opts"])
+
+
+def scheduled(simulation: Simulation):
+ scheduling = getattr(simulation, 'scheduling', False)
+ return scheduling
+
+
+def _add_work_order_asset(_simulation: Simulation, _file_name: str = "WorkOrder.json",
+ _file_path: Union[str, PathLike] = "./WorkOrder.json", _update: bool = True):
+ if scheduled(_simulation):
+ return
+
+ with open(str(_file_path), "r") as jsonFile:
+ _config = json.loads(jsonFile.read())
+
+ if _update and len(_simulation.task.command.cmd) > 0:
+ _config["Command"] = _simulation.task.command.cmd
+
+ ctn = json.dumps(_config, indent=3)
+ _simulation.task.transient_assets.add_asset(Asset(filename=_file_name, content=ctn))
+ setattr(_simulation, 'scheduling', True)
+
+
+def add_work_order(item: Union[Experiment, Simulation, TemplatedSimulations], file_name: str = "WorkOrder.json",
+ file_path: Union[str, PathLike] = "./WorkOrder.json"):
+ if isinstance(item, Simulation):
+ _add_work_order_asset(item, _file_name=file_name, _file_path=file_path, _update=True)
+ elif isinstance(item, TemplatedSimulations):
+ _add_work_order_asset(item.base_simulation, _file_name=file_name, _file_path=file_path, _update=False)
+ elif isinstance(item, Experiment):
+ if isinstance(item.simulations.items, TemplatedSimulations):
+ if len(item.simulations.items) == 0:
+ raise ValueError("You cannot run an empty experiment")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Using Base task from template for WorkOrder.json assets")
+ _add_work_order_asset(item.simulations.items.base_simulation, _file_name=file_name, _file_path=file_path,
+ _update=False)
+ for sim in item.simulations.items.extra_simulations():
+ _add_work_order_asset(sim, _file_name=file_name, _file_path=file_path, _update=True)
+ elif isinstance(item.simulations.items, List):
+ if len(item.simulations.items) == 0:
+ raise ValueError("You cannot run an empty experiment")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Using all tasks to gather assets")
+ for sim in item.simulations.items:
+ _add_work_order_asset(sim, _file_name=file_name, _file_path=file_path, _update=True)
+ elif isinstance(item.simulations.items, List) and len(item.simulations.items) == 0:
+ raise ValueError("You cannot run an empty experiment")
+ else:
+ raise ValueError("The method only support object type: Experiment, Simulation, TemplatedSimulations!")
+
+
+def _add_schedule_config_asset(_simulation: Simulation, _config: dict, _update: bool = True):
+ if scheduled(_simulation):
+ return
+
+ if _update and len(_simulation.task.command.cmd) > 0:
+ _config["Command"] = _simulation.task.command.cmd
+
+ ctn = json.dumps(_config, indent=3)
+ _simulation.task.transient_assets.add_asset(Asset(filename="WorkOrder.json", content=ctn))
+ setattr(_simulation, 'scheduling', True)
+
+
+def add_schedule_config(item: Union[Experiment, Simulation, TemplatedSimulations], command: str = None,
+ node_group_name: str = 'idm_cd', num_cores: int = 1, **config_opts):
+ config = dict(Command=command, NodeGroupName=node_group_name, NumCores=num_cores)
+ config.update(config_opts)
+
+ if isinstance(item, Simulation):
+ _add_schedule_config_asset(item, _config=config, _update=True)
+ elif isinstance(item, TemplatedSimulations):
+ _add_schedule_config_asset(item.base_simulation, _config=config, _update=False)
+ elif isinstance(item, Experiment):
+ if isinstance(item.simulations.items, TemplatedSimulations):
+ if len(item.simulations.items) == 0:
+ raise ValueError("You cannot run an empty experiment")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Using Base task from template for WorkOrder.json assets")
+ _add_schedule_config_asset(item.simulations.items.base_simulation, _config=config, _update=False)
+ for sim in item.simulations.items.extra_simulations():
+ _add_schedule_config_asset(sim, _config=config, _update=True)
+ elif isinstance(item.simulations.items, List):
+ if len(item.simulations.items) == 0:
+ raise ValueError("You cannot run an empty experiment")
+ if logger.isEnabledFor(DEBUG):
+ logger.debug("Using all tasks to gather assets")
+ for sim in item.simulations.items:
+ _add_schedule_config_asset(sim, _config=config, _update=True)
+ elif isinstance(item.simulations.items, List) and len(item.simulations.items) == 0:
+ raise ValueError("You cannot run an empty experiment")
+ else:
+ raise ValueError("The method only support object type: Experiment, Simulation, TemplatedSimulations!")
diff --git a/idmtools_platform_comps/idmtools_platform_comps/utils/singularity_build.py b/idmtools_platform_comps/idmtools_platform_comps/utils/singularity_build.py
index 5d04f2225..ef6f288c6 100644
--- a/idmtools_platform_comps/idmtools_platform_comps/utils/singularity_build.py
+++ b/idmtools_platform_comps/idmtools_platform_comps/utils/singularity_build.py
@@ -412,6 +412,8 @@ def wait(self, wait_on_done_progress: bool = True, timeout: int = None, refresh_
return None
def get_id_filename(self, prefix: str = None) -> str:
+ if prefix is None:
+ prefix = ''
if self.definition_file:
base_name = PurePath(self.definition_file).name.replace(".def", ".id")
if prefix:
diff --git a/idmtools_platform_comps/requirements.txt b/idmtools_platform_comps/requirements.txt
index 56c771d09..47035aa0e 100644
--- a/idmtools_platform_comps/requirements.txt
+++ b/idmtools_platform_comps/requirements.txt
@@ -1,4 +1,4 @@
-idmtools~=1.6.2
-jinja2~=2.11.2
+idmtools~=1.6.3
+jinja2~=2.11.3
pyCOMPS~=2.4.1
tqdm>=4.52.0,<5
\ No newline at end of file
diff --git a/idmtools_platform_comps/setup.py b/idmtools_platform_comps/setup.py
index 2fdace7e1..10db92cfb 100644
--- a/idmtools_platform_comps/setup.py
+++ b/idmtools_platform_comps/setup.py
@@ -54,5 +54,5 @@
""",
extras_require=extras,
url='https://github.com/InstituteforDiseaseModeling/idmtools',
- version='1.6.2'
+ version='1.6.3'
)
diff --git a/idmtools_platform_comps/ssmt_image/Dockerfile b/idmtools_platform_comps/ssmt_image/Dockerfile
index 0f754aed9..7edc7d32d 100644
--- a/idmtools_platform_comps/ssmt_image/Dockerfile
+++ b/idmtools_platform_comps/ssmt_image/Dockerfile
@@ -32,6 +32,9 @@ ADD pip.conf /etc/pip.conf
COPY .depends/* /tmp/
+RUN pip3 install -U pip && \
+ pip install pygit2
+
RUN bash -c "pip3 install /tmp/*.gz --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple"
# make the PIP index configurable so we can build against staging, production, or a local PyPI server
diff --git a/idmtools_platform_comps/tests/inputs/WorkOrder.json b/idmtools_platform_comps/tests/inputs/WorkOrder.json
new file mode 100644
index 000000000..eba0037df
--- /dev/null
+++ b/idmtools_platform_comps/tests/inputs/WorkOrder.json
@@ -0,0 +1,11 @@
+{
+ "Command": "python3.6 --version",
+ "NodeGroupName": "idm_abcd",
+ "NumCores": 1,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2"
+ }
+}
\ No newline at end of file
diff --git a/idmtools_platform_comps/tests/inputs/simple_load_lib_example/fake_wheel_file_a.whl b/idmtools_platform_comps/tests/inputs/simple_load_lib_example/fake_wheel_file_a.whl
new file mode 100644
index 000000000..e69de29bb
diff --git a/idmtools_platform_comps/tests/inputs/simple_load_lib_example/fake_wheel_file_b.whl b/idmtools_platform_comps/tests/inputs/simple_load_lib_example/fake_wheel_file_b.whl
new file mode 100644
index 000000000..e69de29bb
diff --git a/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements1.txt b/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements1.txt
index 737195d40..197a3a7be 100644
--- a/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements1.txt
+++ b/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements1.txt
@@ -1 +1 @@
-pytest==5.4
\ No newline at end of file
+pytest==5.4.0
\ No newline at end of file
diff --git a/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements3.txt b/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements3.txt
index 354cabbf4..3c97388c3 100644
--- a/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements3.txt
+++ b/idmtools_platform_comps/tests/inputs/simple_load_lib_example/requirements3.txt
@@ -1,2 +1,2 @@
-pytest==5.4
+pytest==5.4.0
seaborn~=0.7.1
\ No newline at end of file
diff --git a/idmtools_platform_comps/tests/inputs/workitems/ssmt/WorkOrder.json b/idmtools_platform_comps/tests/inputs/workitems/ssmt/WorkOrder.json
new file mode 100644
index 000000000..1b775702d
--- /dev/null
+++ b/idmtools_platform_comps/tests/inputs/workitems/ssmt/WorkOrder.json
@@ -0,0 +1,8 @@
+{
+ "WorkItem_Type": "DockerWorker",
+ "Execution": {
+ "ImageName": "docker-production.packages.idmod.org/nyu/dtk:20200306",
+ "Command": "/dtk/Eradication -v",
+ "DebugFileName": "debug.txt"
+ }
+}
\ No newline at end of file
diff --git a/idmtools_platform_comps/tests/test_cli.py b/idmtools_platform_comps/tests/test_cli.py
index 2f372799a..8e59f445f 100644
--- a/idmtools_platform_comps/tests/test_cli.py
+++ b/idmtools_platform_comps/tests/test_cli.py
@@ -21,13 +21,6 @@
@allure.story("CLI")
class TestCompsCLI(unittest.TestCase):
- @classmethod
- def setUpClass(cls) -> None:
- # To enable this, you need to also set the env var TEST_WITH_PACKAGES to t or y
- if TEST_WITH_NEW_CODE:
- # Run package dists
- run_package_dists()
-
@classmethod
def setUpClass(cls) -> None:
# Setup logging for cli
@@ -38,6 +31,11 @@ def setUpClass(cls) -> None:
if os.path.exists(pwd.joinpath("singularity.id")):
os.remove(pwd.joinpath("singularity.id"))
+ # To enable this, you need to also set the env var TEST_WITH_PACKAGES to t or y
+ if TEST_WITH_NEW_CODE:
+ # Run package dists
+ run_package_dists()
+
@classmethod
def tearDownClass(cls) -> None:
# Reset Logging
@@ -46,7 +44,16 @@ def tearDownClass(cls) -> None:
setup_logging(level=DEBUG, filename='idmtools.log', force=True)
def test_subcommands_exists(self):
+ result = run_command('--help')
+ print(result.stdout)
+ lines = get_subcommands_from_help_result(result)
+ if "Warning: could not load plugin. See `-c comps_subcommand" in result.stdout:
+ result2 = run_command('comps_subcommand', '--help')
+ print(result2.stdout)
+ with self.subTest("test_comps_subcommand"):
+ self.assertIn('comps', lines)
result = run_command('comps', '--help')
+ print(result.stdout)
lines = get_subcommands_from_help_result(result)
# ensure our command is in the options
with self.subTest("test_assetize_subcommand"):
@@ -55,6 +62,8 @@ def test_subcommands_exists(self):
self.assertIn('singularity', lines)
with self.subTest("test_login_subcommand"):
self.assertIn('login', lines)
+ with self.subTest("test_req2ac_subcommand"):
+ self.assertIn('req2ac', lines)
@allure.feature("AssetizeOutputs")
def test_assetize_dry_run_json(self):
@@ -71,8 +80,8 @@ def test_assetize_id(self):
if os.path.exists(op.joinpath(fn)):
os.remove(op.joinpath(fn))
result = run_command('comps', 'Bayesian', 'assetize-outputs', '--experiment', '9311af40-1337-ea11-a2be-f0921c167861', '--json', '--id-file', '--id-filename', fn, mix_stderr=False)
- self.assertTrue(result.exit_code == 0)
print(result.stdout)
+ self.assertTrue(result.exit_code == 0)
files = json.loads(result.stdout)
self.assertEqual(36, len(files))
self.assertTrue(os.path.exists(op.joinpath(fn)))
@@ -80,12 +89,15 @@ def test_assetize_id(self):
@allure.feature("AssetizeOutputs")
def test_cli_error(self):
result = run_command('comps', 'Bayesian', 'assetize-outputs', '--experiment', '9311af40-1337-ea11-a2be-f0921c167861', '--pattern', '34234234')
+ print(result.stdout)
self.assertTrue(result.exit_code == -1)
self.assertIn("No files found with patterns specified", result.stdout)
@allure.feature("Containers")
def test_container_pull(self):
result = run_command('comps', 'SLURM2', 'singularity', 'pull', 'docker://python:3.8.6', mix_stderr=False)
+ print(result.stdout)
+ print(result.stderr)
self.assertTrue(result.exit_code == 0)
self.assertTrue(os.path.exists("python_3.8.6.id"))
@@ -95,6 +107,7 @@ def test_container_build(self):
if os.path.exists(pwd.joinpath("singularity.id")):
os.remove(pwd.joinpath("singularity.id"))
result = run_command('comps', 'SLURM2', 'singularity', 'build', '--common-input-glob', str(pwd.joinpath('*.txt')), str(pwd.joinpath('singularity.def')), mix_stderr=False)
+ print(result.stdout)
self.assertTrue(result.exit_code == 0)
self.assertTrue(os.path.exists(pwd.joinpath("singularity.id")))
@@ -106,6 +119,7 @@ def test_container_build_force_and_workitem_id(self):
if os.path.exists(pwd.joinpath(file)):
os.remove(pwd.joinpath(file))
result = run_command('comps', 'SLURM2', 'singularity', 'build', '--force', '--id-workitem', '--common-input-glob', str(pwd.joinpath('*.txt')), str(pwd.joinpath('singularity.def')), mix_stderr=False)
+ print(result.stdout)
self.assertTrue(result.exit_code == 0)
for file in id_files:
self.assertTrue(os.path.exists(pwd.joinpath(file)))
diff --git a/idmtools_platform_comps/tests/test_comps_plugin.py b/idmtools_platform_comps/tests/test_comps_plugin.py
index 5d7c30f1a..3b2447d09 100644
--- a/idmtools_platform_comps/tests/test_comps_plugin.py
+++ b/idmtools_platform_comps/tests/test_comps_plugin.py
@@ -1,9 +1,8 @@
-from unittest import mock
-
import allure
import tempfile
import os
import unittest
+from unittest import mock
import dataclasses
import pytest
from idmtools import IdmConfigParser
@@ -108,11 +107,11 @@ def test_get_ssmt_versions(self):
prev_major = int(parts[0])
pre_minor = int(parts[1])
- #
+ @pytest.mark.serial
def test_get_next_ssmt_version(self):
test_versions = ['1.10.0.2', '1.10.0.1', '1.6.0.1', '1.5.1.7', '1.5.1.6', '1.5.0.2', '1.4.0.0', '1.3.0.0', '1.2.2.0', '1.2.0.0',
'1.1.0.2', '1.1.0.0', '1.0.1.0', '1.0.0', '1.0.0.0']
- with mock.patch('idmtools_platform_comps.utils.package_version.fetch_versions_from_server', return_value=test_versions) as mocK_fetch:
+ with mock.patch('idmtools_platform_comps.utils.package_version.get_versions_from_site', return_value=test_versions) as mocK_fetch:
self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.10.0.0"), "1.10.0.2")
self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.5.0.1"), "1.5.1.7")
self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.5.1.1"), "1.5.1.7")
@@ -120,4 +119,4 @@ def test_get_next_ssmt_version(self):
self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.6.0.0"), "1.6.0.1")
self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.1.0.0"), "1.1.0.2")
self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.5.1+nightly.0"), "1.5.1.7")
- self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.6.0+nightly.0"), "1.6.0.1")
\ No newline at end of file
+ self.assertEqual(get_latest_ssmt_image_version_from_artifactory(base_version="1.6.0+nightly.0"), "1.6.0.1")
diff --git a/idmtools_platform_comps/tests/test_experiment_operations.py b/idmtools_platform_comps/tests/test_experiment_operations.py
index 72bbc9440..dd11e82b2 100644
--- a/idmtools_platform_comps/tests/test_experiment_operations.py
+++ b/idmtools_platform_comps/tests/test_experiment_operations.py
@@ -107,6 +107,7 @@ def test_no_assets(self):
self.assertEqual(0, idm_experiment.simulations[0].assets.count)
@allure.story("Assets")
+ @pytest.mark.serial
def test_list_assets(self):
"""
Test that the list assets with children
diff --git a/idmtools_platform_comps/tests/test_load_lib_and_wheel.py b/idmtools_platform_comps/tests/test_load_lib_and_wheel.py
index e7d21910f..b4a72ccad 100644
--- a/idmtools_platform_comps/tests/test_load_lib_and_wheel.py
+++ b/idmtools_platform_comps/tests/test_load_lib_and_wheel.py
@@ -91,6 +91,19 @@ def test_get_latest_package_idm(self):
self.assertIsInstance(package, list)
self.assertEqual(package[-1], '1.0.0')
+ @pytest.mark.long
+ @pytest.mark.comps
+ def test_requirements_to_assetcollection_ignores_platform_num_cores(self):
+ # ------------------------------------------------------
+ # First load 'zipp' package (note: comps does not have 'zipp' package)
+ # ------------------------------------------------------
+ requirements_path = os.path.join(model_path, 'requirements.txt')
+ pL = Platform('SLURM', num_cores=2)
+ rta = RequirementsToAssetCollection(platform=pL, requirements_path=requirements_path)
+ ac_id = rta.run(rerun=True)
+
+ self.assertIsNotNone(ac_id)
+
@pytest.mark.long
@pytest.mark.comps
def test_exp_with_load_zipp_lib(self):
diff --git a/idmtools_platform_comps/tests/test_package_version.py b/idmtools_platform_comps/tests/test_package_version.py
new file mode 100644
index 000000000..8b30adb22
--- /dev/null
+++ b/idmtools_platform_comps/tests/test_package_version.py
@@ -0,0 +1,220 @@
+import os
+import unittest
+import allure
+import pytest
+from unittest import mock
+from idmtools_test.utils.cli import run_command
+from idmtools.assets import AssetCollection
+from idmtools_platform_comps.utils.package_version import get_pkg_match_version, get_latest_version, \
+ fetch_package_versions
+from idmtools_test import COMMON_INPUT_PATH
+
+wheel_file_1 = os.path.join(COMMON_INPUT_PATH, 'simple_load_lib_example', 'fake_wheel_file_a.whl')
+wheel_file_2 = os.path.join(COMMON_INPUT_PATH, 'simple_load_lib_example', 'fake_wheel_file_b.whl')
+
+
+@pytest.mark.comps
+@allure.story("CLI")
+class TestPackageVersionCLI(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls) -> None:
+ pass
+
+ @allure.feature("req2ac")
+ @pytest.mark.serial
+ # cli: idmtools comps SLURM2 req2ac --asset_tag test:123 --pkg astor~=0.7.0
+ def test_create_ac_with_req2ac(self):
+ # run req2ac to get ac_id
+ result = run_command('comps', 'SLURM2', 'req2ac', '--asset_tag', 'test:123', '--pkg', 'astor~=0.7.0',
+ mix_stderr=False)
+ self.assertTrue(result.exit_code == 0, msg=result.output)
+ print(result.stdout)
+ ac_id = result.stdout.strip()
+
+ # run ac-exist to verify ac
+ ac = AssetCollection.from_id(ac_id, as_copy=True)
+ assets = [asset for asset in ac.assets if "astor-0.7.1" in asset.relative_path]
+ self.assertTrue(len(assets) > 0)
+
+ @allure.feature("req2ac")
+ @pytest.mark.serial
+ # cli: idmtools comps SLURM2 ac-exist --pkg astor~=0.7.0
+ def test_ac_exist_with_req2ac(self):
+ # run req2ac to get ac_id
+ result1 = run_command('comps', 'SLURM2', 'req2ac', '--pkg', 'astor~=0.7.0', mix_stderr=False)
+ self.assertTrue(result1.exit_code == 0, msg=result1.output)
+ print(result1.stdout)
+ ac_id = result1.stdout.strip()
+
+ # run ac-exist to verify ac
+ result2 = run_command('comps', 'SLURM2', 'ac-exist', '--pkg', 'astor~=0.7.0', mix_stderr=False)
+ self.assertTrue(result2.exit_code == 0, msg=result2.output)
+ print(result2.stdout)
+ self.assertIn(ac_id, result2.output)
+ ac = AssetCollection.from_id(ac_id, as_copy=True)
+ assets = [asset for asset in ac.assets if "astor-0.7.1" in asset.relative_path]
+ self.assertTrue(len(assets) > 0)
+
+ @allure.feature("req2ac")
+ # cli: idmtools comps SLURM2 ac-exist --pkg pytest==3.0.0
+ def test_ac_not_exist_with_req2ac(self):
+ result = run_command('comps', 'SLURM2', 'ac-exist', '--pkg', 'pytest==3.0.0', mix_stderr=False)
+ self.assertTrue(result.exit_code == 0, msg=result.output)
+ self.assertIn("AC doesn't exist", result.output)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package latest-version --name pytest
+ def test_req2ac_latest_version(self):
+ test_versions = ['10.0.0', '0.8.1', '0.8.0', '0.7.1', '0.7.0', '0.6.2', '0.6.1', '0.6', '0.5', '0.4.1', '0.4',
+ '0.3', '0.2.1', '0.2', '0.1']
+ with mock.patch('idmtools_platform_comps.utils.package_version.fetch_versions_from_server',
+ return_value=test_versions) as mock_fetch:
+ result = run_command('package', 'latest-version', '--name', 'astor', mix_stderr=False)
+ self.assertTrue(result.exit_code == 0, msg=result.output)
+ self.assertTrue("10.0.0", result.output)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package list-versions --name astor
+ def test_req2ac_list_versions(self):
+ import re
+ test_versions = ['10.0.0', '0.8.1', '0.8.0', '0.7.1', '0.7.0', '0.6.2', '0.6.1', '0.6', '0.5', '0.4.1', '0.4',
+ '0.3', '0.2.1', '0.2', '0.1']
+ with mock.patch('idmtools_platform_comps.utils.package_version.fetch_versions_from_server',
+ return_value=test_versions) as mock_fetch:
+ result = run_command('package', 'list-versions', '--name', 'astor', mix_stderr=False)
+ self.assertTrue(result.exit_code == 0, msg=result.output)
+ output_str = result.output
+ actual_versions = re.sub('["[\]\'\n ]', '', output_str).split(',')
+ self.assertListEqual(actual_versions, test_versions)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package compatible-version --name astor base_version 0.7.0
+ def test_req2ac_compatible_version(self):
+ result = run_command('package', 'compatible-version', '--name', 'astor', '--base_version', '0.7.0',
+ mix_stderr=False)
+ self.assertTrue(result.exit_code == 0, msg=result.output)
+ self.assertTrue("0.7.1", result.output)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package checksum --pkg astor==0.8.1
+ def test_req2ac_checksum(self):
+ result = run_command('package', 'checksum', '--pkg', 'astor==0.8.1', mix_stderr=False)
+ self.assertTrue(result.exit_code == 0, msg=result.output)
+ self.assertTrue("3a620d2dc5e26856a9d4442f33785a0a", result.output)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package updated-requirements --pkg astor~=0.7.0
+ def test_req2ac_updated_requirements(self):
+ result = run_command('package', 'updated-requirements', '--pkg', 'astor~=0.7.0', mix_stderr=False)
+ self.assertTrue(result.exit_code == 0, msg=result.output)
+ self.assertTrue("astor==0.7.1", result.stdout_bytes.decode('utf-8'))
+
+ @allure.feature("req2ac")
+ # cli: idmtools package checksum --pkg astor==0.8.1 --pkg idmtools==1.6.2
+ def test_req2ac_checksum_pkg_order(self):
+ result1 = run_command('package', 'checksum', '--pkg', 'astor==0.8.1', '--pkg', 'idmtools==1.6.2',
+ mix_stderr=False)
+ self.assertTrue(result1.exit_code == 0, msg=result1.output)
+ result2 = run_command('package', 'checksum', '--pkg', 'idmtools==1.6.2', '--pkg', 'astor==0.8.1',
+ mix_stderr=False)
+ self.assertTrue(result2.exit_code == 0, msg=result2.output)
+ self.assertEqual(result1.output, result2.output)
+ self.assertTrue("8c815fe17f8d7dfeb790b5d0041b288a", result1.output)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package checksum --wheel wheel_file_1 --wheel wheel_file_2
+ def test_req2ac_checksum_wheels_order(self):
+ result1 = run_command('package', 'checksum', '--wheel', wheel_file_1, '--wheel', wheel_file_2,
+ mix_stderr=False)
+ self.assertTrue(result1.exit_code == 0, msg=result1.output)
+ result2 = run_command('package', 'checksum', '--wheel', wheel_file_2, '--wheel', wheel_file_1,
+ mix_stderr=False)
+ self.assertTrue(result2.exit_code == 0, msg=result2.output)
+ self.assertEqual(result1.output, result2.output)
+ self.assertTrue("d2e204cfbf40e78d6b18fe862f422512", result1.output)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package updated-requirements --wheel wheel_file_1 --wheel wheel_file_2
+ def test_req2ac_updated_requirements_wheels_order(self):
+ result1 = run_command('package', 'updated-requirements', '--wheel', wheel_file_1, '--wheel', wheel_file_2,
+ mix_stderr=False)
+ self.assertTrue(result1.exit_code == 0, msg=result1.output)
+ result2 = run_command('package', 'updated-requirements', '--wheel', wheel_file_2, '--wheel', wheel_file_1,
+ mix_stderr=False)
+ self.assertTrue(result2.exit_code == 0, msg=result2.output)
+ self.assertEqual(result1.output, result2.output)
+ self.assertTrue("d2e204cfbf40e78d6b18fe862f422512", result1.output)
+ self.assertTrue('Assets/fake_wheel_file_a.whl\nAssets/fake_wheel_file_b.whl\n',
+ result1.stdout_bytes.decode('utf-8'))
+
+ @allure.feature("req2ac")
+ # cli: idmtools package updated-requirements --pkg astor~=0.7.0 --pkg idmtools==1.6.2
+ def test_req2ac_updated_requirements_pgk_order(self):
+ result1 = run_command('package', 'updated-requirements', '--pkg', 'astor~=0.7.0', '--pkg', 'idmtools==1.6.2',
+ mix_stderr=False)
+ self.assertTrue(result1.exit_code == 0, msg=result1.output)
+ result2 = run_command('package', 'updated-requirements', '--pkg', 'idmtools==1.6.2', '--pkg', 'astor~=0.7.0',
+ mix_stderr=False)
+ self.assertTrue(result2.exit_code == 0, msg=result2.output)
+ self.assertEqual(result1.output, result2.output)
+ self.assertTrue("astor==0.7.1\nidmtools==1.6.2\n", result1.output)
+ self.assertTrue("astor==0.7.1\nidmtools==1.6.2\n", result1.stdout_bytes.decode('utf-8'))
+
+ @allure.feature("req2ac")
+ # cli: idmtools package updated-requirements --pkg astor==0.8.1 --pkg idmtools==1.6.2 --wheel wheel_file_1 --wheel wheel_file_2
+ def test_req2ac_checksum_pkg_wheels_order(self):
+ result1 = run_command('package', 'checksum', '--pkg', 'astor==0.8.1', '--pkg', 'idmtools==1.6.2',
+ '--wheel', wheel_file_1, '--wheel', wheel_file_2, mix_stderr=False)
+ self.assertTrue(result1.exit_code == 0, msg=result1.output)
+ result2 = run_command('package', 'checksum', '--pkg', 'idmtools==1.6.2', '--pkg', 'astor==0.8.1',
+ '--wheel', wheel_file_2, '--wheel', wheel_file_1, mix_stderr=False)
+ self.assertTrue(result2.exit_code == 0, msg=result2.output)
+ self.assertEqual(result1.output, result2.output)
+ self.assertTrue("8d50bf1f9fc60b806d9816bf5809748d", result1.output)
+
+ @allure.feature("req2ac")
+ # cli: idmtools package updated-requirements --pkg astor==0.8.1 --pkg idmtools==1.6.2 --wheel wheel_file_1 --wheel wheel_file_2
+ def test_req2ac_updated_requirements_pkg_wheels_order(self):
+ result1 = run_command('package', 'updated-requirements', '--pkg', 'astor==0.8.1', '--pkg', 'idmtools==1.6.2',
+ '--wheel', wheel_file_1, '--wheel', wheel_file_2, mix_stderr=False)
+ self.assertTrue(result1.exit_code == 0, msg=result1.output)
+ result2 = run_command('package', 'updated-requirements', '--pkg', 'idmtools==1.6.2', '--pkg', 'astor==0.8.1',
+ '--wheel', wheel_file_2, '--wheel', wheel_file_1, mix_stderr=False)
+ self.assertTrue(result2.exit_code == 0, msg=result2.output)
+ self.assertEqual(result1.output, result2.output)
+ self.assertTrue("8d50bf1f9fc60b806d9816bf5809748d", result1.output)
+ self.assertTrue('astor==0.8.1\nidmtools==1.6.2\nAssets/fake_wheel_file_a.whl\nAssets/fake_wheel_file_b.whl\n',
+ result1.stdout_bytes.decode('utf-8'))
+
+ @pytest.mark.serial
+ def test_get_pkg_match_version(self):
+ test_versions = ['10.0.0', '0.8.1', '0.8.0', '0.7.1', '0.7.0', '0.6.2', '0.6.1', '0.6', '0.5', '0.4.1', '0.4',
+ '0.3', '0.2.1', '0.2', '0.1']
+ with mock.patch('idmtools_platform_comps.utils.package_version.fetch_versions_from_server',
+ return_value=test_versions) as mock_fetch:
+ self.assertEqual(get_pkg_match_version(pkg_name='astor', base_version='0.7.1', test='<'), '0.7.0')
+ self.assertEqual(get_pkg_match_version(pkg_name='astor', base_version='0.7.1', test='<='), '0.7.1')
+ self.assertEqual(get_pkg_match_version(pkg_name='astor', base_version='0.8.0', test='~='), '0.8.1')
+ self.assertEqual(get_pkg_match_version(pkg_name='astor', base_version='0.8.0', test='>='), '10.0.0')
+ self.assertEqual(get_pkg_match_version(pkg_name='astor', base_version='0.8.0', test='>'), '10.0.0')
+ self.assertEqual(get_pkg_match_version(pkg_name='astor', base_version='0.7.1', test='!='), '10.0.0')
+ self.assertEqual(get_pkg_match_version(pkg_name='astor', base_version='0.6', test='=='), '0.6')
+
+ @pytest.mark.serial
+ def test_get_latest_version(self):
+ test_versions = ['10.0.0', '0.8.1', '0.8.0', '0.7.1', '0.7.0', '0.6.2', '0.6.1', '0.6', '0.5', '0.4.1', '0.4',
+ '0.3', '0.2.1', '0.2', '0.1']
+ with mock.patch('idmtools_platform_comps.utils.package_version.fetch_versions_from_server',
+ return_value=test_versions) as mock_fetch:
+ self.assertEqual(get_latest_version(pkg_name='astor'), '10.0.0')
+
+ @pytest.mark.serial
+ def test_fetch_package_versions_with_sort(self):
+ test_versions = ['0.7.1', '0.8.1', '0.8.0r', '0.7.0', '0.6.2', '0.6.1', '0.6', '0.5', '0.4.1', '0.4', '0.3',
+ '0.2.1', '0.1', '0.2']
+ with mock.patch('idmtools_platform_comps.utils.package_version.fetch_versions_from_server',
+ return_value=test_versions) as mock_fetch:
+ expected_sorted_versions = ['0.8.1', '0.8.0r', '0.7.1', '0.7.0', '0.6.2', '0.6.1', '0.6', '0.5', '0.4.1',
+ '0.4', '0.3', '0.2.1', '0.2', '0.1']
+ self.assertEqual(fetch_package_versions(pkg_name='astor', sort=True), expected_sorted_versions)
diff --git a/idmtools_platform_comps/tests/test_vistools/test_vistools_work_item.py b/idmtools_platform_comps/tests/test_vistools/test_vistools_work_item.py
index 861ab1768..fa095da07 100644
--- a/idmtools_platform_comps/tests/test_vistools/test_vistools_work_item.py
+++ b/idmtools_platform_comps/tests/test_vistools/test_vistools_work_item.py
@@ -410,8 +410,8 @@ def test_vistools_output_in_simulation(self):
self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/campaign.json']).endswith('/campaign.json'))
self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/config.json']).endswith('/config.json'))
self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/status.txt']).endswith('/status.txt'))
- self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/StdErr.txt']).endswith('/StdErr.txt'))
- self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/StdOut.txt']).endswith('/StdOut.txt'))
+ self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/stderr.txt']).endswith('/stderr.txt'))
+ self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/stdout.txt']).endswith('/stdout.txt'))
self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/' + p + '/visset.json']).endswith('/visset.json'))
self.assertTrue(str(vtassetmap_dic['/' + visset_name + '/' + p + '/VtWorkerPrepStdErr.txt']).endswith(
'/VtWorkerPrepStdErr.txt'))
@@ -461,10 +461,10 @@ def test_vistools_output_in_simulation(self):
response = requests.get(vtassetmap_dic['/' + visset_name + '/status.txt'])
self.assertTrue(response.status_code < 400)
- response = requests.get(vtassetmap_dic['/' + visset_name + '/StdErr.txt'])
+ response = requests.get(vtassetmap_dic['/' + visset_name + '/stderr.txt'])
self.assertTrue(response.status_code < 400)
- response = requests.get(vtassetmap_dic['/' + visset_name + '/StdOut.txt'])
+ response = requests.get(vtassetmap_dic['/' + visset_name + '/stdout.txt'])
self.assertTrue(response.status_code < 400)
response = requests.get(vtassetmap_dic['/' + visset_name + '/' + p + '/visset.json'])
diff --git a/idmtools_platform_comps/tests/test_workorder.py b/idmtools_platform_comps/tests/test_workorder.py
new file mode 100644
index 000000000..f11b54beb
--- /dev/null
+++ b/idmtools_platform_comps/tests/test_workorder.py
@@ -0,0 +1,432 @@
+import json
+
+import allure
+import os
+from functools import partial
+from typing import Any, Dict
+import pytest
+from idmtools.assets import Asset, AssetCollection
+from idmtools.builders import SimulationBuilder
+from idmtools.core import ItemType
+from idmtools.core.platform_factory import Platform
+from idmtools.entities import CommandLine
+from idmtools.entities.command_task import CommandTask
+from idmtools.entities.experiment import Experiment
+from idmtools.entities.simulation import Simulation
+from idmtools.entities.templated_simulation import TemplatedSimulations
+from idmtools_models.python.json_python_task import JSONConfiguredPythonTask
+from idmtools_platform_comps.ssmt_work_items.work_order import DockerWorkOrder, ExecutionDefinition
+from idmtools_platform_comps.utils.scheduling import add_work_order, add_schedule_config, \
+ default_add_schedule_config_sweep_callback
+from idmtools_models.templated_script_task import TemplatedScriptTask, get_script_wrapper_unix_task, \
+ LINUX_PYTHON_PATH_WRAPPER
+from idmtools_platform_comps.utils.python_requirements_ac.requirements_to_asset_collection import \
+ RequirementsToAssetCollection
+from idmtools_test import COMMON_INPUT_PATH
+from idmtools_test.utils.common_experiments import wait_on_experiment_and_check_all_sim_status
+from idmtools_test.utils.itest_with_persistence import ITestWithPersistence
+from idmtools_platform_comps.utils.scheduling import default_add_workerorder_sweep_callback
+
+
+@pytest.mark.comps
+@pytest.mark.python
+@allure.story("COMPS")
+@allure.story("Python")
+@allure.suite("idmtools_platform_comps")
+class TestWorkOrder(ITestWithPersistence):
+ def setUp(self) -> None:
+ self.case_name = os.path.basename(__file__) + "--" + self._testMethodName
+ print(self.case_name)
+ self.platform = Platform('SLURM2')
+
+ def test_workorder_pythontask(self):
+ """
+ To test python task with WorkOrder.json. Comps will use Executable command from WorkOrder.json instead of
+ python task's command
+ Returns:
+
+ """
+ # create task with script. here script doesn't matter. it will override by WorkOrder.json's command
+ task = JSONConfiguredPythonTask(
+ script_path=os.path.join(COMMON_INPUT_PATH, "compsplatform", "working_model.py"),
+ parameters=(dict(c=0)))
+
+ ts = TemplatedSimulations(base_task=task)
+
+ # use WorkOrder.json which override input commandline command and arguments
+ add_work_order(ts, file_path=os.path.join(COMMON_INPUT_PATH, "scheduling", "slurm", "WorkOrder.json"))
+
+ builder = SimulationBuilder()
+
+ def param_update(simulation: Simulation, param: str, value: Any) -> Dict[str, Any]:
+ return simulation.task.set_parameter(param, value)
+
+ builder.add_sweep_definition(partial(param_update, param="a"), range(3))
+ ts.add_builder(builder)
+
+ experiment = Experiment.from_template(ts, name=self.case_name)
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+ for sim in experiment.simulations:
+ assets = self.platform._simulations.all_files(sim)
+ for asset in assets:
+ if asset.filename in ["stdout.txt"]:
+ content = asset.content.decode('utf-8').replace("\\\\", "\\")
+ self.assertIn('hello test', content)
+
+ def test_workorder_commandtask(self):
+ """
+ To test command task with WorkOrder.json. Comps will use Executable command from WorkOrder.json instead of
+ command task's command specified in the test
+ Returns:
+
+ """
+
+ def set_value(simulation, name, value):
+ fix_value = round(value, 2) if isinstance(value, float) else value
+ # add argument
+ simulation.task.command.add_raw_argument(str(fix_value))
+ # add tag with our value
+ simulation.tags[name] = fix_value
+
+ # create commandline input for the task
+ command = CommandLine("python3 Assets/commandline_model.py")
+ task = CommandTask(command=command)
+ ts = TemplatedSimulations(base_task=task)
+
+ sb = SimulationBuilder()
+ sb.add_sweep_definition(partial(set_value, name="pop_size"), [10000, 20000])
+ sb.add_sweep_definition(partial(set_value, name="pop_infected"), [10, 100])
+ sb.add_sweep_definition(partial(set_value, name="n_days"), [100, 110])
+ sb.add_sweep_definition(partial(set_value, name="rand_seed"), [1234, 4567])
+ sb.add_sweep_definition(partial(default_add_workerorder_sweep_callback, file_name="WorkOrder.json"),
+ os.path.join(COMMON_INPUT_PATH, "scheduling", "slurm", "WorkOrder1.json"))
+
+ ts.add_builder(sb)
+
+ experiment = Experiment.from_template(ts, name=self.case_name)
+ experiment.add_asset(os.path.join(COMMON_INPUT_PATH, "scheduling", "slurm", "commandline_model.py"))
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+
+ # only verify first simulation's stdout.txt
+ files = self.platform.get_files(item=experiment.simulations[0], files=['stdout.txt', 'WorkOrder.json'])
+ stdout_content = files['stdout.txt'].decode('utf-8').replace("\\\\", "\\")
+ stdout_content = stdout_content.replace("\\", "")
+ self.assertIn(
+ "{'pop_size': '10000', 'pop_infected': '10', 'n_days': 100, 'rand_seed': '1234', 'pop_type': 'hybrid'}",
+ stdout_content)
+
+ workorder_content = files['WorkOrder.json'].decode('utf-8').replace("\\\\", "\\")
+ s1 = json.loads(workorder_content)
+ s2 = json.loads("{\"Command\": \"python3 Assets/commandline_model.py 10000 10 100 1234\", \"NodeGroupName\": \"idm_cd\", \"NumCores\": 1, \"NumProcesses\": 1, \"NumNodes\": 1, \"Environment\": {\"key1\": \"value1\", \"key2:\": \"value2\"}}")
+ self.assertDictEqual(s1, s2)
+
+ @pytest.mark.timeout(60)
+ def test_wrapper_script_execute_comps(self):
+ """
+ To test wrapper task with WorkOrder.json. Comps will use Executable command from WorkOrder.json instead of
+ wrapper command specified in the test
+ Returns:
+
+ """
+ cmd = "python3.6 --version"
+ task = CommandTask(cmd)
+
+ task.common_assets.add_asset(
+ Asset(relative_path=os.path.join("site-packages", "test-package"), filename="__init__.py",
+ content="a=\'123\'"))
+ wrapper_task: TemplatedScriptTask = get_script_wrapper_unix_task(task,
+ template_content=LINUX_PYTHON_PATH_WRAPPER)
+ wrapper_task.script_binary = "/bin/bash"
+
+ experiment = Experiment.from_task(wrapper_task, name=self.case_name)
+
+ # upload WorkOrder.json to simulation root dir
+ add_work_order(experiment, file_path=os.path.join("inputs", "WorkOrder.json"))
+
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+
+ for sim in experiment.simulations:
+ assets = self.platform._simulations.all_files(sim)
+ for asset in assets:
+ if asset.filename in ["stdout.txt"]:
+ content = asset.content.decode('utf-8').replace("\\\\", "\\")
+ # don't check full version in case comps updates system
+ self.assertIn('Python 3.6', content)
+
+ def test_workorder_hpc(self):
+ """
+ To test workorder run in hpc cluster
+ Returns:
+
+ """
+ task = JSONConfiguredPythonTask(
+ script_path=os.path.join(COMMON_INPUT_PATH, "compsplatform", "working_model.py"),
+ parameters=(dict(c=0)))
+
+ experiment = Experiment.from_task(task, name=self.case_name)
+ add_work_order(experiment, file_path=os.path.join(COMMON_INPUT_PATH, "scheduling", "hpc", "WorkOrder.json"))
+
+ with Platform('COMPS2') as platform:
+ experiment.run(wait_on_done=True, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+
+ for sim in experiment.simulations:
+ assets = platform._simulations.all_files(sim)
+ for asset in assets:
+ if asset.filename in ["stdout.txt"]:
+ content = asset.content.decode('utf-8').replace("\\\\", "\\")
+ self.assertIn('hello test', content)
+
+ def test_workorder_environment(self):
+ """
+ To test python task with WorkOrder.json's environment. command in WorkOrder.json is python Assets/model.py
+ in COMPS, file layout is:
+ Assets-
+ |_MyExternalLibarary
+ |_function.py
+ |_model.py
+ |_site-packages
+ |_numpy
+ in order for model.py to call MyExternalLibarary.function which uses numpy package, MyExternalLibarary.function
+ and numpy must be in PYTHONPATH
+ So we add "PYTHONPATH": "$PYTHONPATH:$PWD/Assets:$PWD/Assets/site-packages" in WorkOrder.json
+
+ Returns:
+ """
+ # add numpy package to cluster
+ pl = RequirementsToAssetCollection(self.platform, pkg_list=['numpy==1.19.5'])
+ ac_id = pl.run()
+ # add numpy to common_assets for a task
+ common_assets = AssetCollection.from_id(ac_id, as_copy=True)
+ # create task which generate config.json and upload script and assets
+ task = JSONConfiguredPythonTask(
+ script_path=os.path.join(COMMON_INPUT_PATH, "python", "model.py"), parameters=dict(a=1, b=10),
+ envelope="parameters", common_assets=common_assets)
+
+ # Add another folder to comps Assets
+ task.common_assets.add_directory(assets_directory=os.path.join(COMMON_INPUT_PATH, "python", "Assets"))
+ experiment = Experiment.from_task(task, name=self.case_name)
+ # add local WorkOrder2.json to comps and change file name to WorkOrder.json
+ add_work_order(experiment, file_path=os.path.join(COMMON_INPUT_PATH, "scheduling", "slurm", "WorkOrder2.json"))
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+
+ # only verify first simulation's stdout.txt
+ files = self.platform.get_files(item=experiment.simulations[0], files=['stdout.txt'])
+ stdout_content = files['stdout.txt'].decode('utf-8').replace("\\\\", "\\")
+ stdout_content = stdout_content.replace("\\", "")
+ self.assertIn("11", stdout_content) # a+b = 1+10 = 11
+
+ def test_schedule_config_pythontask(self):
+ """
+ To test python task with WorkOrder.json. Comps will use Executable command from WorkOrder.json instead of
+ python task's command
+ Returns:
+
+ """
+ # create task with script. here script doesn't matter. it will override by WorkOrder.json's command
+ task = JSONConfiguredPythonTask(
+ script_path=os.path.join(COMMON_INPUT_PATH, "compsplatform", "working_model.py"),
+ parameters=(dict(c=0)))
+
+ ts = TemplatedSimulations(base_task=task)
+
+ # use dynamic WorkOrder.json which override input commandline command and arguments
+ add_schedule_config(ts, command="python -c \"print('hello test')\"", node_group_name='idm_abcd', num_cores=2,
+ NumProcesses=1, NumNodes=1,
+ Environment={"key1": "value1", "key2:": "value2"})
+
+ builder = SimulationBuilder()
+
+ def param_update(simulation: Simulation, param: str, value: Any) -> Dict[str, Any]:
+ return simulation.task.set_parameter(param, value)
+
+ builder.add_sweep_definition(partial(param_update, param="a"), range(3))
+ ts.add_builder(builder)
+
+ experiment = Experiment.from_template(ts, name=self.case_name)
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+ for sim in experiment.simulations:
+ assets = self.platform._simulations.all_files(sim)
+ for asset in assets:
+ if asset.filename in ["stdout.txt"]:
+ content = asset.content.decode('utf-8').replace("\\\\", "\\")
+ self.assertIn('hello test', content)
+
+ def test_schedule_config_commandtask(self):
+ """
+ To test command task with WorkOrder.json. Comps will use Executable command from WorkOrder.json instead of
+ command task's command specified in the test
+ Returns:
+
+ """
+
+ def set_value(simulation, name, value):
+ fix_value = round(value, 2) if isinstance(value, float) else value
+ # add argument
+ simulation.task.command.add_raw_argument(str(fix_value))
+ # add tag with our value
+ simulation.tags[name] = fix_value
+
+ # create commandline input for the task
+ command = CommandLine("python3 Assets/commandline_model.py")
+ task = CommandTask(command=command)
+ ts = TemplatedSimulations(base_task=task)
+
+ sb = SimulationBuilder()
+ sb.add_sweep_definition(partial(set_value, name="pop_size"), [10000, 20000])
+ sb.add_sweep_definition(partial(set_value, name="pop_infected"), [10, 100])
+ sb.add_sweep_definition(partial(set_value, name="n_days"), [100, 110])
+ sb.add_sweep_definition(partial(set_value, name="rand_seed"), [1234, 4567])
+ sb.add_sweep_definition(
+ partial(default_add_schedule_config_sweep_callback,
+ command="python3 Assets/commandline_model.py {pop_size} {pop_infected} {n_days} {rand_seed}",
+ node_group_name='idm_cd', num_cores=1),
+ [dict(NumProcesses=1, NumNodes=1, Environment={"key1": "value1", "key2:": "value2"})])
+
+ ts.add_builder(sb)
+
+ experiment = Experiment.from_template(ts, name=self.case_name)
+ experiment.add_asset(os.path.join(COMMON_INPUT_PATH, "scheduling", "slurm", "commandline_model.py"))
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+
+ # only verify first simulation's stdout.txt
+ files = self.platform.get_files(item=experiment.simulations[0], files=['stdout.txt', 'WorkOrder.json'])
+ stdout_content = files['stdout.txt'].decode('utf-8').replace("\\\\", "\\")
+ stdout_content = stdout_content.replace("\\", "")
+ self.assertIn(
+ "{'pop_size': '10000', 'pop_infected': '10', 'n_days': 100, 'rand_seed': '1234', 'pop_type': 'hybrid'}",
+ stdout_content)
+
+ workorder_content = files['WorkOrder.json'].decode('utf-8').replace("\\\\", "\\")
+ s1 = json.loads(workorder_content)
+ s2 = json.loads("{\"Command\": \"python3 Assets/commandline_model.py 10000 10 100 1234\", \"NodeGroupName\": \"idm_cd\", \"NumCores\": 1, \"NumProcesses\": 1, \"NumNodes\": 1, \"Environment\": {\"key1\": \"value1\", \"key2:\": \"value2\"}}")
+ self.assertDictEqual(s1, s2)
+
+ @pytest.mark.timeout(60)
+ def test_schedule_config_with_wrapper_script_execute_comps(self):
+ """
+ To test wrapper task with WorkOrder.json. Comps will use Executable command from WorkOrder.json instead of
+ wrapper command specified in the test
+ Returns:
+
+ """
+ cmd = "python3.6 --version"
+ task = CommandTask(cmd)
+
+ task.common_assets.add_asset(
+ Asset(relative_path=os.path.join("site-packages", "test-package"), filename="__init__.py",
+ content="a=\'123\'"))
+ wrapper_task: TemplatedScriptTask = get_script_wrapper_unix_task(task,
+ template_content=LINUX_PYTHON_PATH_WRAPPER)
+ wrapper_task.script_binary = "/bin/bash"
+
+ experiment = Experiment.from_task(wrapper_task, name=self.case_name)
+
+ # upload dynamic WorkOrder.json to simulation root dir
+ add_schedule_config(experiment, command="python3.6 --version", node_group_name='idm_abcd', num_cores=1,
+ NumProcesses=1, NumNodes=1, Environment={"key1": "value1", "key2:": "value2"})
+
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+
+ for sim in experiment.simulations:
+ assets = self.platform._simulations.all_files(sim)
+ for asset in assets:
+ if asset.filename in ["stdout.txt"]:
+ content = asset.content.decode('utf-8').replace("\\\\", "\\")
+ # don't check full version in case comps updates system
+ self.assertIn('Python 3.6', content)
+
+ def test_schedule_config_hpc(self):
+ """
+ To test workorder run in hpc cluster
+ Returns:
+
+ """
+ task = JSONConfiguredPythonTask(
+ script_path=os.path.join(COMMON_INPUT_PATH, "compsplatform", "working_model.py"),
+ parameters=(dict(c=0)))
+
+ experiment = Experiment.from_task(task, name=self.case_name)
+ add_schedule_config(experiment, command="python -c \"print('hello test')\"", node_group_name='emod_abcd',
+ num_cores=1, SingleNode=False, Exclusive=False)
+
+ with Platform('COMPS2') as platform:
+ experiment.run(wait_on_done=True, scheduling=True)
+ self.assertTrue(experiment.succeeded)
+
+ for sim in experiment.simulations:
+ assets = platform._simulations.all_files(sim)
+ for asset in assets:
+ if asset.filename in ["stdout.txt"]:
+ content = asset.content.decode('utf-8').replace("\\\\", "\\")
+ self.assertIn('hello test', content)
+
+ def test_schedule_config_environment(self):
+ """
+ To test python task with WorkOrder.json's environment. command in WorkOrder.json is python Assets/model.py
+ in COMPS, file layout is:
+ Assets-
+ |_MyExternalLibarary
+ |_function.py
+ |_model.py
+ |_site-packages
+ |_numpy
+ in order for model.py to call MyExternalLibarary.function which uses numpy package, MyExternalLibarary.function
+ and numpy must be in PYTHONPATH
+ So we add "PYTHONPATH": "$PYTHONPATH:$PWD/Assets:$PWD/Assets/site-packages" in WorkOrder.json
+
+ Returns:
+ """
+ # add numpy package to cluster
+ pl = RequirementsToAssetCollection(self.platform, pkg_list=['numpy==1.19.5'])
+ ac_id = pl.run()
+ # add numpy to common_assets for a task
+ common_assets = AssetCollection.from_id(ac_id, as_copy=True)
+ # create task which generate config.json and upload script and assets
+ task = JSONConfiguredPythonTask(
+ script_path=os.path.join(COMMON_INPUT_PATH, "python", "model.py"), parameters=dict(a=1, b=10),
+ envelope="parameters", common_assets=common_assets)
+
+ # Add another folder to comps Assets
+ task.common_assets.add_directory(assets_directory=os.path.join(COMMON_INPUT_PATH, "python", "Assets"))
+ experiment = Experiment.from_task(task, name=self.case_name)
+ # add dynamic WorkOrder2.json to comps and change file name to WorkOrder.json
+ add_schedule_config(experiment, command="python3 Assets/model.py", node_group_name='idm_abcd', num_cores=1,
+ NumProcesses=1, NumNodes=1,
+ Environment={"key1": "value1", "key2:": "value2",
+ "PYTHONPATH": "$PYTHONPATH:$PWD/Assets:$PWD/Assets/site-packages",
+ "PATH": "$PATH:$PWD/Assets:$PWD/Assets/site-packages"})
+
+ wait_on_experiment_and_check_all_sim_status(self, experiment, self.platform, scheduling=True)
+
+ # only verify first simulation's stdout.txt
+ files = self.platform.get_files(item=experiment.simulations[0], files=['stdout.txt'])
+ stdout_content = files['stdout.txt'].decode('utf-8').replace("\\\\", "\\")
+ stdout_content = stdout_content.replace("\\", "")
+ self.assertIn("11", stdout_content) # a+b = 1+10 = 11
+
+ def test_workorder_in_workitem(self):
+ """
+ To test WorkItem's WorkOrder.json, user can dynamically pull docker image from idm's production artifactory directly
+ instead of old way which had to deploy docker image to docker worker host machine
+ in this example, we pull nyu dtk docker image to docker worker, then execute Eradication command in comps's
+ WorkItem
+ Returns:
+ """
+ command = "ls -lart" # anything since it will be override with WorkOrder.json file
+ from idmtools_platform_comps.ssmt_work_items.comps_workitems import SSMTWorkItem
+ wi = SSMTWorkItem(name=self.case_name, command=command,tags={'idmtools': self.case_name})
+ # overrode workorder.json with user provide file
+ wi.load_work_order(os.path.join("inputs", "workitems", "ssmt", "WorkOrder.json"))
+ wi.run(wait_on_done=True)
+ out_filenames = ["stdout.txt"]
+ files = self.platform.get_files(item=wi, files=out_filenames)
+ stdout_content = files['stdout.txt'].decode('utf-8')
+ self.assertIn("/dtk/Eradication version: 2.17.4463.0", stdout_content)
+
diff --git a/idmtools_platform_local/.bumpversion.cfg b/idmtools_platform_local/.bumpversion.cfg
index a62b43129..be0ca5bea 100644
--- a/idmtools_platform_local/.bumpversion.cfg
+++ b/idmtools_platform_local/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.6.2
+current_version = 1.6.3
commit = False
tag = False
parse = (?P\d+)\.(?P\d+)\.(?P\d+)((?P[\+a-z]+)\.(?P\d+))?
diff --git a/idmtools_platform_local/VERSION b/idmtools_platform_local/VERSION
index 308b6faa7..f5d2a5858 100644
--- a/idmtools_platform_local/VERSION
+++ b/idmtools_platform_local/VERSION
@@ -1 +1 @@
-1.6.2
\ No newline at end of file
+1.6.3
\ No newline at end of file
diff --git a/idmtools_platform_local/idmtools_platform_local/__init__.py b/idmtools_platform_local/idmtools_platform_local/__init__.py
index 51bbb3f21..31e744e4a 100644
--- a/idmtools_platform_local/idmtools_platform_local/__init__.py
+++ b/idmtools_platform_local/idmtools_platform_local/__init__.py
@@ -1 +1 @@
-__version__ = "1.6.2"
+__version__ = "1.6.3"
diff --git a/idmtools_platform_local/requirements.txt b/idmtools_platform_local/requirements.txt
index d07d9b28c..fd8a3e044 100644
--- a/idmtools_platform_local/requirements.txt
+++ b/idmtools_platform_local/requirements.txt
@@ -1,10 +1,10 @@
backoff~=1.10.0
click~=7.1.2
colorama~=0.4.4
-docker>=4.3.1
-dramatiq[redis, watch]~=1.9.0
-gevent~=20.9.0
-idmtools~=1.6.2
+docker>=4.3.1,<4.5.0
+dramatiq[redis, watch]~=1.10.0
+gevent>=20.12.1,<=21.2.0
+idmtools~=1.6.3
requests>=2.25.0
stringcase~=1.2.0
-tabulate~=0.8.7
\ No newline at end of file
+tabulate>=0.8.9,<0.9
diff --git a/idmtools_platform_local/setup.py b/idmtools_platform_local/setup.py
index c20dafe83..937a46c74 100644
--- a/idmtools_platform_local/setup.py
+++ b/idmtools_platform_local/setup.py
@@ -73,5 +73,5 @@
test_suite='tests',
extras_require=extras,
url='https://github.com/InstituteforDiseaseModeling/idmtools',
- version='1.6.2'
+ version='1.6.3'
)
diff --git a/idmtools_platform_local/tests/test_analyzers_local.py b/idmtools_platform_local/tests/test_analyzers_local.py
index 1b9554b44..44b20ef93 100644
--- a/idmtools_platform_local/tests/test_analyzers_local.py
+++ b/idmtools_platform_local/tests/test_analyzers_local.py
@@ -74,7 +74,7 @@ def setUpClass(cls) -> None:
cls.exp_id = e.uid
- @pytest.mark.timeout(90)
+ @pytest.mark.timeout(180)
@pytest.mark.long
def test_AddAnalyzer(self):
self.case_name = os.path.basename(__file__) + "--" + self._testMethodName
diff --git a/idmtools_platform_local/workers_requirements.txt b/idmtools_platform_local/workers_requirements.txt
index 59a742578..300f3d0e8 100644
--- a/idmtools_platform_local/workers_requirements.txt
+++ b/idmtools_platform_local/workers_requirements.txt
@@ -1,2 +1,2 @@
-sqlalchemy~=1.3.20
+sqlalchemy~=1.4.1
psycopg2-binary~=2.8.6
diff --git a/idmtools_platform_slurm/requirements.txt b/idmtools_platform_slurm/requirements.txt
index f87f2b2b6..1dca67e0b 100644
--- a/idmtools_platform_slurm/requirements.txt
+++ b/idmtools_platform_slurm/requirements.txt
@@ -1,3 +1,3 @@
-idmtools~=1.6.2
+idmtools~=1.6.3
paramiko~=2.7.2
dataclasses-json
\ No newline at end of file
diff --git a/idmtools_test/.bumpversion.cfg b/idmtools_test/.bumpversion.cfg
index 0c3113a2b..78f4d909e 100644
--- a/idmtools_test/.bumpversion.cfg
+++ b/idmtools_test/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.6.2
+current_version = 1.6.3
commit = False
tag = False
parse = (?P\d+)\.(?P\d+)\.(?P\d+)((?P[\+a-z]+)\.(?P\d+))?
diff --git a/idmtools_test/idmtools_test/__init__.py b/idmtools_test/idmtools_test/__init__.py
index a0ddc0d47..5a367d465 100644
--- a/idmtools_test/idmtools_test/__init__.py
+++ b/idmtools_test/idmtools_test/__init__.py
@@ -1,5 +1,5 @@
import os
-__version__ = '1.6.2'
+__version__ = '1.6.3'
current_directory = os.path.dirname(os.path.realpath(__file__))
COMMON_INPUT_PATH = os.path.join(current_directory, "inputs")
diff --git a/idmtools_test/idmtools_test/inputs/scheduling/hpc/WorkOrder.json b/idmtools_test/idmtools_test/inputs/scheduling/hpc/WorkOrder.json
new file mode 100644
index 000000000..5c3b4875c
--- /dev/null
+++ b/idmtools_test/idmtools_test/inputs/scheduling/hpc/WorkOrder.json
@@ -0,0 +1,7 @@
+{
+ "Command": "python -c \"print('hello test')\"",
+ "NodeGroupName": "emod_abcd",
+ "NumCores": 1,
+ "SingleNode": false,
+ "Exclusive": false
+}
\ No newline at end of file
diff --git a/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder.json b/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder.json
new file mode 100644
index 000000000..dc9758af4
--- /dev/null
+++ b/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder.json
@@ -0,0 +1,11 @@
+{
+ "Command": "python -c \"print('hello test')\"",
+ "NodeGroupName": "idm_abcd",
+ "NumCores": 2,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2"
+ }
+}
\ No newline at end of file
diff --git a/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder1.json b/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder1.json
new file mode 100644
index 000000000..6add49c70
--- /dev/null
+++ b/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder1.json
@@ -0,0 +1,11 @@
+{
+ "Command": "python3 Assets/commandline_model.py {pop_size} {pop_infected} {n_days} {rand_seed}",
+ "NodeGroupName": "idm_cd",
+ "NumCores": 1,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2"
+ }
+}
\ No newline at end of file
diff --git a/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder2.json b/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder2.json
new file mode 100644
index 000000000..5bc46a8ab
--- /dev/null
+++ b/idmtools_test/idmtools_test/inputs/scheduling/slurm/WorkOrder2.json
@@ -0,0 +1,13 @@
+{
+ "Command": "python3 Assets/model.py",
+ "NodeGroupName": "idm_abcd",
+ "NumCores": 1,
+ "NumProcesses": 1,
+ "NumNodes": 1,
+ "Environment": {
+ "key1": "value1",
+ "key2:": "value2",
+ "PYTHONPATH": "$PYTHONPATH:$PWD/Assets:$PWD/Assets/site-packages",
+ "PATH": "$PATH:$PWD/Assets:$PWD/Assets/site-packages"
+ }
+}
\ No newline at end of file
diff --git a/idmtools_test/idmtools_test/inputs/scheduling/slurm/commandline_model.py b/idmtools_test/idmtools_test/inputs/scheduling/slurm/commandline_model.py
new file mode 100644
index 000000000..dfe889aa9
--- /dev/null
+++ b/idmtools_test/idmtools_test/inputs/scheduling/slurm/commandline_model.py
@@ -0,0 +1,22 @@
+import sys
+import time
+
+
+def test_sweep(pop_size=10000, pop_infected=10, n_days=120, rand_seed=1, pop_type='hybrid'):
+ pars = {
+ "pop_size": pop_size, # Population size
+ "pop_infected": pop_infected, # Number of initial infections
+ "n_days": n_days, # Number of days to simulate
+ "rand_seed": rand_seed, # Random seed
+ "pop_type": pop_type, # Population to use -- "hybrid" is random with household, school,and work structure
+ }
+ print(str(pars))
+
+
+if __name__ == "__main__":
+ pop_size= sys.argv[1]
+ pop_infected = sys.argv[2]
+ n_days = int(sys.argv[3])
+ rand_seed = sys.argv[4]
+
+ test_sweep(pop_size=pop_size, pop_infected=pop_infected, n_days=n_days, rand_seed=rand_seed, pop_type='hybrid')
\ No newline at end of file
diff --git a/idmtools_test/idmtools_test/test_precreate_hooks.py b/idmtools_test/idmtools_test/test_precreate_hooks.py
index e477d1ecd..3f4b8025b 100644
--- a/idmtools_test/idmtools_test/test_precreate_hooks.py
+++ b/idmtools_test/idmtools_test/test_precreate_hooks.py
@@ -1,9 +1,7 @@
import os
+import contextlib
from idmtools.core import TRUTHY_VALUES, getLogger
from idmtools.registry.hook_specs import function_hook_impl
-from idmtools_platform_comps.utils.assetize_output.assetize_output import AssetizeOutput
-from idmtools_platform_comps.utils.download.download import DownloadWorkItem
-from idmtools_test.utils.comps import load_library_dynamically
TEST_WITH_NEW_CODE = os.environ.get("TEST_WITH_PACKAGES", 'n').lower() in TRUTHY_VALUES
logger = getLogger(__name__)
@@ -17,7 +15,11 @@ def idmtools_platform_pre_create_item(item: 'IEntity', **kwargs):
def load_packages_to_ssmt_image_dynamically(item):
- if os.environ.get("TEST_WITH_PACKAGES", 'n').lower() in TRUTHY_VALUES:
- logger.debug("TEST WITH NEW CODE is enabled. Adding COMPS and IDMTOOLS package to asset")
- if isinstance(item, (AssetizeOutput, DownloadWorkItem)):
- item.add_pre_creation_hook(load_library_dynamically)
+ with contextlib.suppress(ImportError):
+ from idmtools_platform_comps.utils.assetize_output.assetize_output import AssetizeOutput
+ from idmtools_platform_comps.utils.download.download import DownloadWorkItem
+ from idmtools_test.utils.comps import load_library_dynamically
+ if os.environ.get("TEST_WITH_PACKAGES", 'n').lower() in TRUTHY_VALUES:
+ logger.debug("TEST WITH NEW CODE is enabled. Adding COMPS and IDMTOOLS package to asset")
+ if isinstance(item, (AssetizeOutput, DownloadWorkItem)):
+ item.add_pre_creation_hook(load_library_dynamically)
diff --git a/idmtools_test/idmtools_test/utils/common_experiments.py b/idmtools_test/idmtools_test/utils/common_experiments.py
index 102addc75..e1468e442 100644
--- a/idmtools_test/idmtools_test/utils/common_experiments.py
+++ b/idmtools_test/idmtools_test/utils/common_experiments.py
@@ -54,7 +54,8 @@ def get_model1_templated_experiment(case_name, parameters=None):
def wait_on_experiment_and_check_all_sim_status(tc, experiment, platform=None,
- expected_status: EntityStatus = EntityStatus.SUCCEEDED):
+ expected_status: EntityStatus = EntityStatus.SUCCEEDED,
+ scheduling=False):
"""
Run experiment and wait for it to finish then check all sims succeeded
Args:
@@ -65,7 +66,7 @@ def wait_on_experiment_and_check_all_sim_status(tc, experiment, platform=None,
Returns:
"""
- experiment.run(wait_until_done=True)
+ experiment.run(wait_until_done=True, scheduling=scheduling)
if isinstance(tc, type):
tc.assertTrue(tc, all([s.status == expected_status for s in experiment.simulations]))
else:
diff --git a/idmtools_test/requirements.txt b/idmtools_test/requirements.txt
index dac0e9d01..2bc4f0cac 100644
--- a/idmtools_test/requirements.txt
+++ b/idmtools_test/requirements.txt
@@ -1,13 +1,13 @@
-idmtools~=1.6.2
-pytest~=6.1.2
-pytest-runner~=5.2
+idmtools~=1.6.3
+pytest
+pytest-runner~=5.3
xmlrunner~=1.7.7
-sqlalchemy~=1.3.20
+sqlalchemy~=1.4.1
psycopg2-binary~=2.8.6
flask~=1.1
Flask-AutoIndex~=0.6.6
flask_restful~=0.3.8
Flask-SQLAlchemy~=2.4.4
-matplotlib~=3.3.3
+matplotlib~=3.3.4
Werkzeug==1.0.1
numpy!=1.19.4
\ No newline at end of file
diff --git a/idmtools_test/setup.py b/idmtools_test/setup.py
index 7c216efca..99c9cce64 100644
--- a/idmtools_test/setup.py
+++ b/idmtools_test/setup.py
@@ -44,6 +44,6 @@
),
test_suite='tests',
url='https://github.com/InstituteforDiseaseModeling/idmtools',
- version='1.6.2',
+ version='1.6.3',
zip_safe=False
)