diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml index 5656c87b861c..241658fb71dc 100644 --- a/.github/workflows/changelog.yaml +++ b/.github/workflows/changelog.yaml @@ -5,6 +5,7 @@ on: tags: - v* - "!v0.0.0" + permissions: contents: read @@ -17,7 +18,7 @@ jobs: pull-requests: write # for peter-evans/create-pull-request to create a PR runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: main fetch-depth: 0 @@ -25,7 +26,7 @@ jobs: - run: git tag -l 'v*' # avoid invoking `make` to reduce the risk of a Makefile bug failing this workflow - run: ./hack/changelog.sh > CHANGELOG.md - - uses: peter-evans/create-pull-request@v5 + - uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 with: title: 'docs: updated CHANGELOG.md' commit-message: 'docs: updated CHANGELOG.md' diff --git a/.github/workflows/ci-build.yaml b/.github/workflows/ci-build.yaml index 215eec0ffcce..69dd984caea8 100644 --- a/.github/workflows/ci-build.yaml +++ b/.github/workflows/ci-build.yaml @@ -29,12 +29,12 @@ jobs: ui: ${{ steps.changed-files.outputs.ui_any_modified == 'true' }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 50 # assume PRs are less than 50 commits - name: Get relevant files changed per group id: changed-files - uses: tj-actions/changed-files@v40 + uses: tj-actions/changed-files@cbda684547adc8c052d50711417fa61b428a9f88 # v41.1.2 with: files_yaml: | common: &common @@ -57,6 +57,7 @@ jobs: e2e-tests: - *tests # plus manifests and SDKs that are used in E2E tests + - Dockerfile - manifests/** - sdks/** codegen: @@ -73,6 +74,7 @@ jobs: - pkg/** - cmd/** - examples/** # examples are used within the fields lists + - manifests/** # a few of these are generated and committed # generation scripts - hack/cli/** - hack/jsonschema/** @@ -88,6 +90,8 @@ jobs: - *tests # plus lint config - .golangci.yml + # all GH workflows / actions + - .github/workflows/** # docs files below - docs/** # generated files are covered by codegen @@ -111,8 +115,8 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: "1.21" cache: true @@ -122,33 +126,40 @@ jobs: if: github.ref == 'refs/heads/main' run: bash <(curl -s https://codecov.io/bash) - argoexec-image: - name: argoexec-image + argo-images: + name: argo-images # needs: [ lint ] runs-on: ubuntu-latest timeout-minutes: 10 + strategy: + fail-fast: false + matrix: + include: + - image: argoexec + - image: argocli steps: - - uses: actions/checkout@v4 - - uses: docker/setup-buildx-action@v3 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - name: Build and export - uses: docker/build-push-action@v5 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: context: . - tags: quay.io/argoproj/argoexec:latest - outputs: type=docker,dest=/tmp/argoexec_image.tar - target: argoexec + tags: quay.io/argoproj/${{matrix.image}}:latest + outputs: type=docker,dest=/tmp/${{matrix.image}}_image.tar + target: ${{matrix.image}} cache-from: type=gha cache-to: type=gha,mode=max - name: Upload - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: - name: argoexec - path: /tmp/argoexec_image.tar + name: ${{matrix.image}}_image.tar + path: /tmp/${{matrix.image}}_image.tar if-no-files-found: error e2e-tests: name: E2E Tests - needs: [ argoexec-image ] + needs: [ changed-files, argo-images ] + if: ${{ needs.changed-files.outputs.e2e-tests == 'true' }} runs-on: ubuntu-latest timeout-minutes: 30 env: @@ -190,21 +201,21 @@ jobs: steps: - name: Install socat (needed by Kubernetes v1.25) run: sudo apt-get -y install socat - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: "1.21" cache: true - name: Install Java for the SDK if: ${{matrix.test == 'test-java-sdk'}} - uses: actions/setup-java@v4 + uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 with: java-version: '8' distribution: adopt cache: maven - name: Install Python for the SDK if: ${{matrix.test == 'test-python-sdk'}} - uses: actions/setup-python@v5 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: '3.x' cache: pip @@ -222,13 +233,16 @@ jobs: echo " user:" >> $KUBECONFIG echo " token: xxxxxx" >> $KUBECONFIG until kubectl cluster-info ; do sleep 10s ; done - - name: Download argoexec image - uses: actions/download-artifact@v4 + - name: Download images + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: - name: argoexec + pattern: '*_image.tar' path: /tmp - - name: Load argoexec image - run: docker load < /tmp/argoexec_image.tar + - name: Load images + run: | + set -eux + docker load < /tmp/argoexec_image.tar/argoexec_image.tar + docker load < /tmp/argocli_image.tar/argocli_image.tar - name: Set-up /etc/hosts run: | echo '127.0.0.1 dex' | sudo tee -a /etc/hosts @@ -295,8 +309,8 @@ jobs: env: GOPATH: /home/runner/go steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: "1.21" cache: true @@ -331,8 +345,8 @@ jobs: env: GOPATH: /home/runner/go steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: "1.21" cache: true @@ -340,6 +354,9 @@ jobs: # if lint makes changes that are not in the PR, fail the build - name: Check if lint made changes not present in the PR run: git diff --exit-code + # lint GH Actions + - name: Ensure GH Actions are pinned to SHAs + uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # v3.0.3 ui: name: UI @@ -348,8 +365,8 @@ jobs: env: NODE_OPTIONS: --max-old-space-size=4096 steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1 with: node-version: "20" # change in all GH Workflows cache: yarn diff --git a/.github/workflows/default-branch-check.yaml b/.github/workflows/default-branch-check.yaml index 5e4ac0a54aa2..dd8b0268b225 100644 --- a/.github/workflows/default-branch-check.yaml +++ b/.github/workflows/default-branch-check.yaml @@ -12,7 +12,7 @@ jobs: steps: - name: fail if base branch is not default branch if: ${{ github.event.pull_request.base.ref != github.event.repository.default_branch }} - uses: actions/github-script@v3 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: script: | core.setFailed("Base branch of the PR - ${{ github.event.pull_request.base.ref }} is not a default branch. Please reopen your PR to ${{ github.event.repository.default_branch }}") diff --git a/.github/workflows/dependabot-reviewer.yml b/.github/workflows/dependabot-reviewer.yml index 0dfb799f0e6a..9b23ac71b790 100644 --- a/.github/workflows/dependabot-reviewer.yml +++ b/.github/workflows/dependabot-reviewer.yml @@ -9,13 +9,13 @@ jobs: review: if: ${{ github.actor == 'dependabot[bot]' && github.repository == 'argoproj/argo-workflows'}} permissions: - pull-requests: write - contents: write + pull-requests: write # for approving a PR + contents: write # for enabling auto-merge on a PR runs-on: ubuntu-latest steps: - name: Dependabot metadata id: metadata - uses: dependabot/fetch-metadata@v1.6.0 + uses: dependabot/fetch-metadata@c9c4182bf1b97f5224aee3906fd373f6b61b4526 # v1.6.0 with: github-token: "${{ secrets.GITHUB_TOKEN }}" - name: Approve PR diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 5cdac87abf25..78a47d34172b 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -13,38 +13,40 @@ concurrency: cancel-in-progress: true permissions: - contents: write + contents: read jobs: docs: runs-on: ubuntu-latest + permissions: + contents: write # for publishing the docs to GH Pages steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: 3.9 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: '1.21' - - uses: actions/setup-node@v4 + - uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1 with: node-version: "19" # Use the same make target both locally and on CI to make it easier to debug failures. - name: Build & Lint docs run: make docs - # If markdownlint fixes issues, files will be changed. If so, fail the build. - - name: Check if markdownlint --fix made changes + # If linters auto-fix issues, files will be changed. If so, fail the build. + - name: Check if linters made changes run: git diff --exit-code # Upload the site so reviewers see it. - name: Upload Docs Site - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: docs path: site if-no-files-found: error - name: Publish to GH Pages (when on main) if: github.repository == 'argoproj/argo-workflows' && github.ref == 'refs/heads/main' - uses: peaceiris/actions-gh-pages@v3 + uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_branch: gh-pages diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 806f0185412e..94041ebca51d 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -8,11 +8,14 @@ on: - reopened - synchronize +permissions: + contents: read + jobs: title-check: runs-on: ubuntu-latest steps: - name: Check PR Title's semantic conformance - uses: amannn/action-semantic-pull-request@v5 + uses: amannn/action-semantic-pull-request@e9fabac35e210fea40ca5b14c0da95a099eff26f # v5.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 16c3fa54521e..b99ae416a008 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -22,7 +22,7 @@ permissions: jobs: build-linux-amd64: - name: Build & push linux/amd64 + name: Build & push linux if: github.repository == 'codefresh-io/argo-workflows' runs-on: ubuntu-latest strategy: @@ -30,74 +30,18 @@ jobs: platform: [ linux/amd64 ] target: [ workflow-controller, argocli, argoexec ] steps: - - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - with: - version: v0.9.1 - - - name: Cache Docker layers - uses: actions/cache@v3 - id: cache - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx- - - - name: Docker Login - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAYIO_USERNAME }} - password: ${{ secrets.QUAYIO_PASSWORD }} - - - name: Docker Buildx - env: - DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }} - PLATFORM: ${{ matrix.platform }} - TARGET: ${{ matrix.target }} - run: | - tag=$(basename $GITHUB_REF) - if [ $tag = "master" ]; then - tag="latest" - fi - - tag_suffix=$(echo $PLATFORM | sed -r "s/\//-/g") - image_name="${DOCKERIO_ORG}/${TARGET}:${tag}-${tag_suffix}" - - docker buildx build \ - --cache-from "type=local,src=/tmp/.buildx-cache" \ - --cache-to "type=local,dest=/tmp/.buildx-cache" \ - --output "type=image,push=true" \ - --platform="${PLATFORM}" \ - --target $TARGET \ - --tag quay.io/$image_name . - - build-linux-arm64: - name: Build & push linux/arm64 - if: github.repository == 'codefresh-io/argo-workflows' - runs-on: ubuntu-latest - strategy: - matrix: - platform: [ linux/arm64 ] - target: [ workflow-controller, argocli, argoexec ] - steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - with: - platforms: arm64 + uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 with: - version: v0.9.1 + version: v0.10.4 - name: Cache Docker layers - uses: actions/cache@v3 + uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 id: cache with: path: /tmp/.buildx-cache @@ -105,8 +49,8 @@ jobs: restore-keys: | ${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx- - - name: Docker Login - uses: docker/login-action@v2 + - name: Login to Quay + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 with: registry: quay.io username: ${{ secrets.QUAYIO_USERNAME }} @@ -118,11 +62,15 @@ jobs: PLATFORM: ${{ matrix.platform }} TARGET: ${{ matrix.target }} run: | + set -eux tag=$(basename $GITHUB_REF) - if [ $tag = "master" ]; then + if [ $tag = "main" ]; then tag="latest" fi - + # copied verbatim from Makefile + GIT_COMMIT=$(git rev-parse HEAD || echo unknown) + GIT_TAG=$(git describe --exact-match --tags --abbrev=0 2> /dev/null || echo untagged) + GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) tag_suffix=$(echo $PLATFORM | sed -r "s/\//-/g") image_name="${DOCKERIO_ORG}/${TARGET}:${tag}-${tag_suffix}" @@ -130,8 +78,13 @@ jobs: --cache-from "type=local,src=/tmp/.buildx-cache" \ --cache-to "type=local,dest=/tmp/.buildx-cache" \ --output "type=image,push=true" \ + --build-arg GIT_COMMIT=$GIT_COMMIT \ + --build-arg GIT_TAG=$GIT_TAG \ + --build-arg GIT_TREE_STATE=$GIT_TREE_STATE \ --platform="${PLATFORM}" \ --target $TARGET \ + --provenance=false \ + --tag $image_name \ --tag quay.io/$image_name . build-windows: @@ -139,10 +92,10 @@ jobs: if: github.repository == 'codefresh-io/argo-workflows' runs-on: windows-2022 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Login to Quay - uses: docker/login-action@v2 + uses: Azure/docker-login@83efeb77770c98b620c73055fbb59b2847e17dc0 # v1.0.1 with: registry: quay.io username: ${{ secrets.QUAYIO_USERNAME }} @@ -168,6 +121,7 @@ jobs: docker tag $image_name quay.io/$image_name docker push quay.io/$image_name + done push-images: @@ -176,10 +130,10 @@ jobs: runs-on: ubuntu-latest needs: [ build-linux-amd64, build-linux-arm64, build-windows ] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Login to Quay - uses: docker/login-action@v2 + uses: Azure/docker-login@83efeb77770c98b620c73055fbb59b2847e17dc0 # v1.0.1 with: registry: quay.io username: ${{ secrets.QUAYIO_USERNAME }} @@ -228,7 +182,7 @@ jobs: target: [ workflow-controller, argocli, argoexec ] steps: - name: Login to Quay - uses: Azure/docker-login@v1 + uses: Azure/docker-login@83efeb77770c98b620c73055fbb59b2847e17dc0 # v1.0.1 with: login-server: quay.io username: ${{ secrets.QUAYIO_USERNAME }} @@ -255,7 +209,7 @@ jobs: needs: [ push-images ] steps: - name: Login to Quay - uses: Azure/docker-login@v1 + uses: Azure/docker-login@83efeb77770c98b620c73055fbb59b2847e17dc0 # v1.0.1 with: login-server: quay.io username: ${{ secrets.QUAYIO_USERNAME }} @@ -285,24 +239,25 @@ jobs: env: NODE_OPTIONS: --max-old-space-size=4096 steps: - - uses: actions/checkout@v3 - - uses: actions/setup-node@v3 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1 with: - node-version: "20" - - uses: actions/setup-go@v3 + node-version: "20" # change in all GH Workflows + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: "1.21" - - uses: actions/cache@v3 + - name: Restore node packages cache + uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ui/node_modules key: ${{ runner.os }}-node-dep-v1-${{ hashFiles('**/yarn.lock') }} - name: go build cache - uses: actions/cache@v3 + uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: /home/runner/.cache/go-build key: GOCACHE-v2-${{ hashFiles('**/go.mod') }} - name: go mod cache - uses: actions/cache@v3 + uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: /home/runner/go/pkg/mod key: GOMODCACHE-v2-${{ hashFiles('**/go.mod') }} @@ -323,7 +278,7 @@ jobs: # If a conflict occurs (because you are not on a tag), the release will not be updated. This is a short coming # of this action. # Instead, delete the release so it is re-created. - - uses: softprops/action-gh-release@v1 + - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1 if: startsWith(github.ref, 'refs/tags/v') with: prerelease: ${{ startsWith(github.ref, 'refs/tags/v0') || contains(github.ref, 'rc') }} diff --git a/.github/workflows/sdks.yaml b/.github/workflows/sdks.yaml index 38f9773bd82a..84454c2f868d 100644 --- a/.github/workflows/sdks.yaml +++ b/.github/workflows/sdks.yaml @@ -21,7 +21,7 @@ jobs: - java - python steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - run: make --directory sdks/${{matrix.name}} publish -B env: JAVA_SDK_MAVEN_PASSWORD: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index b078e70c15d7..279e54d593f0 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -20,9 +20,9 @@ jobs: env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Run Snyk to check for Go vulnerabilities - uses: snyk/actions/golang@master + uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # v0.4.0 with: args: --severity-threshold=high @@ -33,15 +33,15 @@ jobs: env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1 with: node-version: "20" # change in all GH Workflows cache: yarn cache-dependency-path: ui/yarn.lock - run: yarn --cwd ui install - name: Run Snyk to check for Node vulnerabilities - uses: snyk/actions/node@master + uses: snyk/actions/node@b98d498629f1c368650224d6d212bf7dfa89e4bf # v0.4.0 with: args: --file=ui/package.json --severity-threshold=high diff --git a/.spelling b/.spelling index eb1e2bc90fca..7ff7db1f308d 100644 --- a/.spelling +++ b/.spelling @@ -1,8 +1,4 @@ -# markdown-spellcheck spelling configuration file -# Format - lines beginning # are comments -# global dictionary is at the start, file overrides afterwards -# one word per line, to define a file override use ' - filename' -# where filename is relative to this configuration file +# markdown-spellcheck dictionary 000s 0s 100Mi @@ -35,9 +31,6 @@ ArgoLabs Artifactory BlackRock Breitgand -Codespaces -Couler -ClusterRoleBinding CRD CRDs CloudSQL @@ -51,30 +44,27 @@ CronWorkflow CronWorkflows DataDog Dataflow -DeleteObject DevOps +Devenv Dex EditorConfig EtcD EventRouter -FailFast -GSoC +Generator GitOps Github Golang -goroutine -goroutines Grafana Grammarly Hadoop Heptio Homebrew +IAM-based +IPs InitContainer InsideBoard Invocators -IAM-based Istio -J.P. Jemison JetBrains KNative @@ -85,10 +75,11 @@ Killercoda KubectlExec Kubeflow Kustomize +LDFlags Lifecycle-Hook LitmusChaos -metadata MLOps +Makefile MinIO Minikube MySQL @@ -99,46 +90,44 @@ Node.JS. OAuth OAuth2 Okta -parameterize -parameterized -parameterizing +OpenAPI PDBs PProf PVCs Peixuan Ploomber Postgres +RCs Roadmap RoleBinding -s3 SDKs +SageMaker ServiceAccount Sharding -shortcodes Singer.io Snyk Sumit Tekton -Tianchu Traefik +Triaging TripAdvisor +UI VSCode Valasek Webhooks Welch -`CronTab` -`OnFailure` +WorkflowTemplate +WorkflowTemplates a.m. -alexec anded apis architecting argo +argoproj args async auth backend -blkperl boolean booleans buildkit @@ -147,9 +136,11 @@ config cpu cron daemoned -dev-container -dinever +dependabot +dev +devenv dockershim +docs dropdown e.g. e2e @@ -160,10 +151,13 @@ errored expr fibonacci finalizer +gitops +goroutine +goroutines govaluate gzipped -Generator i.e. +idempotence instantiator instantiators jenkins @@ -171,22 +165,28 @@ k3d k3s k8s-jobs kube +kube-apiserver +kube-scheduler +kubectl kubelet kubernetes liveness localhost +maxFailures +maxSuccess memoization memoized memoizing -mentee -mentees +metadata minikube mutex namespace namespaces natively -OpenAPI +nix.conf +non-member p.m. +parameterizing params pprof pre-commit @@ -195,22 +195,19 @@ repo roadmap runtime runtimes +s3 sandboxed -sarabala1979 -simster7 +shortcodes stateful stderr -tczhao -terrytangyuan -themself +triaged un-reconciled -untracked v1 v1.0 v1.1 v1.2 -v1.3 v1.24 +v1.3 v2 v2.0 v2.1 @@ -237,23 +234,13 @@ v3.3. v3.4 v3.4. v3.5 +versioned validator +vendored +versioned versioning webHDFS webhook webhooks workflow-controller-configmap yaml -idempotence -kube-scheduler -kube-apiserver -kubectl -Makefile -Devenv -devenv -vendored -nix.conf -LDFlags -dev -vendorSha256 -dependabot diff --git a/CHANGELOG.md b/CHANGELOG.md index f8f10ff5bdb6..3bce5fa8130d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,68 @@ # Changelog +## v3.5.6 (2024-04-19) + +* [200f4d1e5](https://github.com/argoproj/argo-workflows/commit/200f4d1e5ffee0a57a9e7a9995b95da15230eb97) fix: don't load entire archived workflow into memory in list APIs (#12912) +* [fe5c6128c](https://github.com/argoproj/argo-workflows/commit/fe5c6128c6535a636995958c2b44c699c2540be5) fix(ui): default to `main` container name in event source logs API call (#12939) +* [06e6a0df7](https://github.com/argoproj/argo-workflows/commit/06e6a0df7b56b442e5b21071b2584cd593cea9d3) fix(build): close `pkg/apiclient/_.secondary.swagger.json` (#12942) +* [909fdaa98](https://github.com/argoproj/argo-workflows/commit/909fdaa987014e527fbb4f487bce283d682b9854) fix: correct order in artifactGC error log message (#12935) +* [ab7bee7b0](https://github.com/argoproj/argo-workflows/commit/ab7bee7b05fb61b293b89ad4f9f2b1a137b93e84) fix: workflows that are retrying should not be deleted (Fixes #12636) (#12905) +* [9c2581ad0](https://github.com/argoproj/argo-workflows/commit/9c2581ad0f0f83a6fd1754a9fdad9e846a9bc39f) fix: change fatal to panic. (#12931) +* [01f843828](https://github.com/argoproj/argo-workflows/commit/01f843828b92911581e90dcd3a7d0299a79add9c) fix: Correct log level for agent containers (#12929) +* [30f2e0d93](https://github.com/argoproj/argo-workflows/commit/30f2e0d93cbaaf06a64e70d9cde6648b2ce41f6b) fix: DAG with continueOn in error after retry. Fixes: #11395 (#12817) +* [1c1f43313](https://github.com/argoproj/argo-workflows/commit/1c1f43313578ece6648c1dd7c93d94596b7a4302) fix: use multipart upload method to put files larger than 5Gi to OSS. Fixes #12877 (#12897) +* [8c9a85761](https://github.com/argoproj/argo-workflows/commit/8c9a85761db22284b103f1d500cc9336e95b9766) fix: remove completed taskset status before update workflow. Fixes: #12832 (#12835) +* [ce7cad34b](https://github.com/argoproj/argo-workflows/commit/ce7cad34bca3540a196b56d9b4492bab6cd70d3a) fix: make sure Finalizers has chance to be removed. Fixes: #12836 (#12831) +* [5d03f838c](https://github.com/argoproj/argo-workflows/commit/5d03f838c418272be33eb0abc52d5fbbb271a6ff) fix(test): wait enough time to Trigger Running Hook. Fixes: #12844 (#12855) +* [3d0648893](https://github.com/argoproj/argo-workflows/commit/3d064889300bb323af1c81cc5bcf61c2a65ebcfa) fix: filter hook node to find the correct lastNode. Fixes: #12109 (#12815) +* [c9dd50d35](https://github.com/argoproj/argo-workflows/commit/c9dd50d35b87086421e0e24ccbb481591f6f9425) fix: terminate workflow should not get throttled Fixes #12778 (#12792) +* [faaddf3ac](https://github.com/argoproj/argo-workflows/commit/faaddf3acc2bc82b02600701af5076adebbdf0d2) fix(containerSet): mark container deleted when pod deleted. Fixes: #12210 (#12756) +* [4e7d471c0](https://github.com/argoproj/argo-workflows/commit/4e7d471c0d3ae856ff22056739147b52ea3ba5fc) fix: return itself when getOutboundNodes from memoization Hit steps/DAG. Fixes: #7873 (#12780) +* [519faf03c](https://github.com/argoproj/argo-workflows/commit/519faf03c6df81fa2c34269cb2a3a0fc119a433f) fix: pass dnsconfig to agent pod. Fixes: #12824 (#12825) +* [56d7b2b9c](https://github.com/argoproj/argo-workflows/commit/56d7b2b9c6844d7cb1e69d8711c9322221e2f911) fix: inline template loops should receive more than the first item. Fixes: #12594 (#12628) +* [19a7edebb](https://github.com/argoproj/argo-workflows/commit/19a7edebbb4524e409e0e9f4225f1bf6b0073312) fix: workflow stuck in running state when using activeDeadlineSeconds on template level. Fixes: #12329 (#12761) +* [68c089d49](https://github.com/argoproj/argo-workflows/commit/68c089d49346d72e16017353bcf54d32d1d8b165) fix: ensure workflowtaskresults complete before mark workflow completed status. Fixes: #12615 (#12574) +* [b189afa48](https://github.com/argoproj/argo-workflows/commit/b189afa48d2824cd419fe5db23c55e6204020e49) fix: patch report outputs completed if task result not exists. (#12748) +* [eec6ae0e4](https://github.com/argoproj/argo-workflows/commit/eec6ae0e4dcfd721f2f706e796279b378653438f) fix(log): change task set to task result. (#12749) +* [a20f69571](https://github.com/argoproj/argo-workflows/commit/a20f69571f4cef97b353f8b3a80cd1161b80274d) chore(deps): upgrade `mkdocs-material` from 8.2.6 to 9.x (#12894) +* [c956d70ee](https://github.com/argoproj/argo-workflows/commit/c956d70eead3cedf2f8c1422c028e26fe4b45683) fix(hack): various fixes & improvements to cherry-pick script (#12714) +* [1c09db42e](https://github.com/argoproj/argo-workflows/commit/1c09db42ec69540ec64e5dd60a6daef3473c6783) fix(deps): upgrade x/net to v0.23.0. Fixes CVE-2023-45288 (#12921) +* [1c3401dc6](https://github.com/argoproj/argo-workflows/commit/1c3401dc68236979fc26b35c787256fcb96a7d1f) fix(deps): upgrade `http2` to v0.24. Fixes CVE-2023-45288 (#12901) +* [ddf815fb2](https://github.com/argoproj/argo-workflows/commit/ddf815fb2885b7c207177e211349a6e1a169aec3) chore(deps): bump cloud.google.com/go/storage from 1.35.1 to 1.36.0 (#12378) +* [bc42b0881](https://github.com/argoproj/argo-workflows/commit/bc42b08812d193242522a14964829c7a1bf362a6) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.9.0 to 1.9.1 (#12376) +* [ec84a61c6](https://github.com/argoproj/argo-workflows/commit/ec84a61c6e337b012dcce1a21b7298d07ec3526e) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.8.0 to 1.9.0 (#12298) +* [0d4bb58de](https://github.com/argoproj/argo-workflows/commit/0d4bb58deca980458f9309315d01dc668149fd3b) docs(install): use material admonition instead of unicode caution (#12561) +* [d9f2184ac](https://github.com/argoproj/argo-workflows/commit/d9f2184acac1fd390f68f36aefc59706885003d8) build(docs): add script to sync docs/README.md with README.md (#12752) +* [a1643357c](https://github.com/argoproj/argo-workflows/commit/a1643357c235a84d6838331dc8df7c1d83d58abe) refactor(build): simplify `mkdocs build` scripts (#12463) +* [c8082b6fc](https://github.com/argoproj/argo-workflows/commit/c8082b6fc386408e73063d1ad0402510445fa94c) fix(deps): upgrade `crypto` from v0.20 to v0.22. Fixes CVE-2023-42818 (#12900) +* [4fb03eef9](https://github.com/argoproj/argo-workflows/commit/4fb03eef988d6d7824d6620fca5a75524039e2de) chore(deps): bump `undici` from 5.28.3 to 5.28.4 in /ui (#12891) +* [4ce9e02d3](https://github.com/argoproj/argo-workflows/commit/4ce9e02d382992855269b8381d6bcaec44bdd1cd) chore(deps): bump `follow-redirects` from 1.15.4 to 1.15.6 due to CVE +* [20c81f8a5](https://github.com/argoproj/argo-workflows/commit/20c81f8a522ac8c238b5ec5c35d5596688771643) build(deps): bump github.com/go-jose/go-jose/v3 from 3.0.1 to 3.0.3 (#12879) +* [ceef27bf2](https://github.com/argoproj/argo-workflows/commit/ceef27bf2bb7594ccdaca64c693cf3149baf2be3) build(deps): bump github.com/docker/docker from 24.0.0+incompatible to 24.0.9+incompatible (#12878) +* [8fcadffc1](https://github.com/argoproj/argo-workflows/commit/8fcadffc1cc25461c8ff6cf68f5430c8b494d726) fix(deps): upgrade `pgx` from 4.18.1 to 4.18.2 due to CVE (#12753) +* [43630bd8e](https://github.com/argoproj/argo-workflows/commit/43630bd8ec1207ee882295f47ba682aed8dde534) chore(deps): upgrade Cosign to v2.2.3 (#12850) +* [6d41e8cfa](https://github.com/argoproj/argo-workflows/commit/6d41e8cfa90940d570fe428e3e3fc039d77cd012) fix(deps): upgrade `undici` to 5.28.3 due to CVE (#12763) +* [1f39d328d](https://github.com/argoproj/argo-workflows/commit/1f39d328df494296ef929c6cdac7d5a344fbafe3) chore(deps): bump google.golang.org/protobuf to 1.33.0 to fix CVE-2024-24786 (#12846) +* [c353b0921](https://github.com/argoproj/argo-workflows/commit/c353b092198007f495ce14405fed25914a88a5b8) chore(deps): bump github.com/creack/pty from 1.1.20 to 1.1.21 (#12312) +* [d95791fdf](https://github.com/argoproj/argo-workflows/commit/d95791fdf94f728690e89284df4da7373af6012b) fix: mark task result completed use nodeId instead of podname. Fixes: #12733 (#12755) +* [03f9f7583](https://github.com/argoproj/argo-workflows/commit/03f9f75832dd3dc4aca14b7d40b7e8c22f4e26fd) fix(ui): show correct podGC message for deleteDelayDuration. Fixes: #12395 (#12784) + +### Contributors + +* AlbeeSo +* Andrei Shevchenko +* Anton Gilgur +* Jiacheng Xu +* Shiwei Tang +* Shunsuke Suzuki +* Tianchu Zhao +* Yuan Tang +* Yulin Li +* dependabot[bot] +* guangwu +* shuangkun tian +* static-moonlight + ## v3.5.5 (2024-02-29) * [6af917eb3](https://github.com/argoproj/argo-workflows/commit/6af917eb322bb84a2733723433a9eb87b7f1e85d) chore(deps): bump github.com/cloudflare/circl to 1.3.7 to fix GHSA-9763-4f94-gfch (#12556) @@ -693,6 +756,121 @@ * yeicandoit * younggil +## v3.4.17 (2024-05-12) + +* [72efa2f15](https://github.com/argoproj/argo-workflows/commit/72efa2f1509d55c8863cf806c2ad83adf0aea65a) chore(deps): bump github.com/cloudflare/circl to 1.3.7 to fix GHSA-9763-4f94-gfch (#12556) +* [0f71a40db](https://github.com/argoproj/argo-workflows/commit/0f71a40dbd35da3090c8fcbaa88299bcc6c6e037) chore(deps): fixed medium CVE in github.com/docker/docker v24.0.0+incompatible (#12635) +* [6030af483](https://github.com/argoproj/argo-workflows/commit/6030af483b34357b74b46f9760b24379cc2ea2bb) chore(deps): upgrade Cosign to v2.2.3 (#12850) +* [cc258b874](https://github.com/argoproj/argo-workflows/commit/cc258b874cf1fd6af30e8246497a2688be5cf0c5) build(deps): bump github.com/docker/docker from 24.0.0+incompatible to 24.0.9+incompatible (#12878) +* [7e7d99b67](https://github.com/argoproj/argo-workflows/commit/7e7d99b67bb0d237940a30583404eb4b039daea3) build(deps): bump github.com/go-jose/go-jose/v3 from 3.0.1 to 3.0.3 (#12879) +* [6bb096efb](https://github.com/argoproj/argo-workflows/commit/6bb096efb6d2d0b0f692e9ac22c0c795c9b3b67c) chore(deps): bump `express`, `follow-redirects`, and `webpack-dev-middleware` (#12880) +* [a38cab742](https://github.com/argoproj/argo-workflows/commit/a38cab742cb29e4ab97ad1c57325b0564b32f45e) chore(deps): bump `undici` from 5.28.3 to 5.28.4 in /ui (#12891) +* [ae8e2e526](https://github.com/argoproj/argo-workflows/commit/ae8e2e526d1e9fa0f47693eaa805938b2db57704) fix: run linter on docs +* [d08a1c2f2](https://github.com/argoproj/argo-workflows/commit/d08a1c2f2a1b536f17a606e2bfea1a92fc060636) fix: linted typescript files +* [bf0174dba](https://github.com/argoproj/argo-workflows/commit/bf0174dba83300dddcf8340492914c750c26efb2) fix: `insecureSkipVerify` for `GetUserInfoGroups` (#12982) +* [2df039b0b](https://github.com/argoproj/argo-workflows/commit/2df039b0b66abbe3b59f89d0879da2d4135bcaa8) fix(ui): default to `main` container name in event source logs API call (#12939) +* [0f3a00d7f](https://github.com/argoproj/argo-workflows/commit/0f3a00d7fa7fa37a3a56d1576ce441a3049303cf) fix(build): close `pkg/apiclient/_.secondary.swagger.json` (#12942) +* [f1af3263c](https://github.com/argoproj/argo-workflows/commit/f1af3263c97065b7fff32669a98e0a5ccb4b5726) fix: correct order in artifactGC error log message (#12935) +* [627069692](https://github.com/argoproj/argo-workflows/commit/6270696921d66831d639a8c911d56fcf2066eb2a) fix: workflows that are retrying should not be deleted (Fixes #12636) (#12905) +* [caa339be2](https://github.com/argoproj/argo-workflows/commit/caa339be2dd23654bf9a347810fac243185e7679) fix: change fatal to panic. (#12931) +* [fb08ad044](https://github.com/argoproj/argo-workflows/commit/fb08ad044ed9ed30b18de5de27a4ea12f49e7511) fix: Correct log level for agent containers (#12929) +* [30a756e9e](https://github.com/argoproj/argo-workflows/commit/30a756e9e3655bb7025cc1692136d5f93ed95033) fix(deps): upgrade x/net to v0.23.0. Fixes CVE-2023-45288 (#12921) +* [b0120579d](https://github.com/argoproj/argo-workflows/commit/b0120579dd06c4a351a32cedfe3ecdff16aae73e) fix(deps): upgrade `http2` to v0.24. Fixes CVE-2023-45288 (#12901) +* [de840948c](https://github.com/argoproj/argo-workflows/commit/de840948ce90687cf2b9a7820c2a6e3f5bee2823) fix(deps): upgrade `crypto` from v0.20 to v0.22. Fixes CVE-2023-42818 (#12900) +* [aa2bd8f3e](https://github.com/argoproj/argo-workflows/commit/aa2bd8f3ee2a5eee0c531a213b9975ca35f0f0dd) fix: use multipart upload method to put files larger than 5Gi to OSS. Fixes #12877 (#12897) +* [c5b4935fa](https://github.com/argoproj/argo-workflows/commit/c5b4935fab36ae12c3fcb66daf3a9b1f8c610723) fix: make sure Finalizers has chance to be removed. Fixes: #12836 (#12831) +* [774388a7b](https://github.com/argoproj/argo-workflows/commit/774388a7b0410ca5a94b799a5f7bfabc04333e3b) fix(test): wait enough time to Trigger Running Hook. Fixes: #12844 (#12855) +* [7821fdd0a](https://github.com/argoproj/argo-workflows/commit/7821fdd0a5dd36dfeadeeab9ebb7ba67c7d4d137) fix: terminate workflow should not get throttled Fixes #12778 (#12792) +* [e0c16ff0f](https://github.com/argoproj/argo-workflows/commit/e0c16ff0f52fb29138afb539d1a6b2f296d4ef32) fix: pass dnsconfig to agent pod. Fixes: #12824 (#12825) +* [82d14db2e](https://github.com/argoproj/argo-workflows/commit/82d14db2e50f7996f760772a7f538f1da2b93291) fix(deps): upgrade `undici` from 5.28.2 to 5.28.3 due to CVE (#12763) +* [9eb269d73](https://github.com/argoproj/argo-workflows/commit/9eb269d735fff855a6c20b46b396a8b4475a553a) fix(deps): upgrade `pgx` from 4.18.1 to 4.18.2 due to CVE (#12753) +* [6bd6a6373](https://github.com/argoproj/argo-workflows/commit/6bd6a63736a89edc36e4c0e07588e663fad08c4a) fix: inline template loops should receive more than the first item. Fixes: #12594 (#12628) +* [1f5bb49ce](https://github.com/argoproj/argo-workflows/commit/1f5bb49ce7f8209fbd108598edc9d58eae4a23e5) fix: workflow stuck in running state when using activeDeadlineSeconds on template level. Fixes: #12329 (#12761) +* [1a259cb11](https://github.com/argoproj/argo-workflows/commit/1a259cb11e059ff1ce1f0c1e29215ee8b913dc9e) fix(ui): show correct podGC message for deleteDelayDuration. Fixes: #12395 (#12784) +* [982038a88](https://github.com/argoproj/argo-workflows/commit/982038a88b764b497b4cf8a5e5934b6f4adaa517) fix(hack): various fixes & improvements to cherry-pick script (#12714) +* [c5ebbcf3a](https://github.com/argoproj/argo-workflows/commit/c5ebbcf3a11e44ddcdc4454dcfbeb74c17a9aee6) fix: make WF global parameters available in retries (#12698) +* [56ff88e02](https://github.com/argoproj/argo-workflows/commit/56ff88e02fd1e51a832c8ba95438d9b7284c98b7) fix: find correct retry node when using `templateRef`. Fixes: #12633 (#12683) +* [389492b4c](https://github.com/argoproj/argo-workflows/commit/389492b4cd95ca37edfc8a4b210b769e2c057a39) fix: Patch taskset with subresources to delete completed node status.… (#12620) +* [6194b8ada](https://github.com/argoproj/argo-workflows/commit/6194b8ada7ccf981084058c10dac411b44a695f9) fix(typo): fix some typo (#12673) +* [6cda00d2e](https://github.com/argoproj/argo-workflows/commit/6cda00d2e733ee40b2ae6d2c4f55ca50be72a8fd) fix(controller): re-allow changing executor `args` (#12609) +* [c590b2ef5](https://github.com/argoproj/argo-workflows/commit/c590b2ef564d25a7fef94803a0d03610a060dfec) fix(controller): add missing namespace index from workflow informer (#12666) +* [42ce47626](https://github.com/argoproj/argo-workflows/commit/42ce47626e669ace4011feb59f786c9d07561a39) fix: pass through burst and qps for auth.kubeclient (#12575) +* [4f8dd2ee7](https://github.com/argoproj/argo-workflows/commit/4f8dd2ee7d716ba2fc9e08edd013acb66bc9494c) fix: artifact subdir error when using volumeMount (#12638) +* [3cd016b00](https://github.com/argoproj/argo-workflows/commit/3cd016b004fbc57360b8b23989fc492ae7dd4313) fix: Allow valueFrom in dag arguments parameters. Fixes #11900 (#11902) +* [c15a75b00](https://github.com/argoproj/argo-workflows/commit/c15a75b0076a6a69be0d0f0efb4c6129d3732ec5) fix(resources): improve ressource accounting. Fixes #12468 (#12492) +* [83a49b4b9](https://github.com/argoproj/argo-workflows/commit/83a49b4b9638b160c9320cd0e808179c31482ee5) fix: upgrade expr-lang. Fixes #12037 (#12573) +* [bc7889be3](https://github.com/argoproj/argo-workflows/commit/bc7889be398378bd1875d8ae0532c437695652e2) fix: make etcd errors transient (#12567) +* [b9a22f876](https://github.com/argoproj/argo-workflows/commit/b9a22f8764e69c4feb6a18aab5ea55782180c282) fix: update minio chart repo (#12552) +* [574fd3ad2](https://github.com/argoproj/argo-workflows/commit/574fd3ad23d253d43757c47a6786350826c354e1) fix: add resource quota evaluation timed out to transient (#12536) +* [93e981d78](https://github.com/argoproj/argo-workflows/commit/93e981d78bc32a2ac599c63927ed3116b9cb51f8) fix: prevent update race in workflow cache (Fixes #9574) (#12233) +* [5f4845dbc](https://github.com/argoproj/argo-workflows/commit/5f4845dbc1415e1d0875f0361d8b7225086666d0) fix: Fixed mutex with withSequence in http template broken. Fixes #12018 (#12176) +* [790c0a4d1](https://github.com/argoproj/argo-workflows/commit/790c0a4d14b821af9942a590239ece9f7c30f18d) fix: SSO with Jumpcloud "email_verified" field #12257 (#12318) +* [e1bb99c3c](https://github.com/argoproj/argo-workflows/commit/e1bb99c3c33263d183423ce230e23d803c5fef5f) fix: wrong values are assigned to input parameters of workflowtemplat… (#12412) +* [c9ad89985](https://github.com/argoproj/argo-workflows/commit/c9ad899856529946087ab58fee949af144221657) fix: http template host header rewrite(#12385) (#12386) +* [e6ea4b147](https://github.com/argoproj/argo-workflows/commit/e6ea4b147d761c6118febaabd0f9e05e427185d3) fix: ensure workflow wait for onExit hook for DAG template (#11880) (#12436) +* [7db24e009](https://github.com/argoproj/argo-workflows/commit/7db24e009c0621c95a8e59cf54263df694252255) fix: move log with potential sensitive data to debug loglevel. Fixes: #12366 (#12368) +* [9540f8e0f](https://github.com/argoproj/argo-workflows/commit/9540f8e0f982052584c0080d04ba967703ec3485) fix: resolve output artifact of steps from expression when it refers … (#12320) +* [adf368514](https://github.com/argoproj/argo-workflows/commit/adf368514563d446c5ce8a729caec77320cf2862) fix: delete pending pod when workflow terminated (#12196) +* [fedfb3790](https://github.com/argoproj/argo-workflows/commit/fedfb3790ad052587b39fa03fee6daf2f15876ea) fix: create dir when input path is not exist in oss (#12323) +* [a68e1f053](https://github.com/argoproj/argo-workflows/commit/a68e1f0530ff1b0fd688a1d05c1d8d126ba3bd79) fix: return failed instead of success when no container status (#12197) +* [eb9bbc8aa](https://github.com/argoproj/argo-workflows/commit/eb9bbc8aac953978371feca37605803bf654f49a) fix: Changes to workflow semaphore does work #12194 (#12284) +* [731366411](https://github.com/argoproj/argo-workflows/commit/731366411a630a7565c3703956b18395a4fc78fd) fix: properly resolve exit handler inputs (fixes #12283) (#12288) +* [58418906f](https://github.com/argoproj/argo-workflows/commit/58418906f2e8406d2e49f59545b49cb10c9d32b4) fix: Add identifiable user agent in API client. Fixes #11996 (#12276) +* [d6c5ed078](https://github.com/argoproj/argo-workflows/commit/d6c5ed078fbd9b9c21cebb97e27391529c7629fa) fix: remove deprecated function rand.Seed (#12271) +* [732b94a73](https://github.com/argoproj/argo-workflows/commit/732b94a73bf7bdb23ba27af5feb568383d0079a1) fix: leak stream (#12193) +* [6daa22b08](https://github.com/argoproj/argo-workflows/commit/6daa22b085625c23f47c34125257578c1ed74051) fix(server): allow passing loglevels as env vars to Server (#12145) +* [e8e9c2a48](https://github.com/argoproj/argo-workflows/commit/e8e9c2a48197c45dc6481f2637694ab524e458c4) fix: retry S3 on RequestError. Fixes #9914 (#12191) +* [18685ad8d](https://github.com/argoproj/argo-workflows/commit/18685ad8da825b9ccd660386fbba078edb9eb211) fix: Fix the Maximum Recursion Depth prompt link in the CLI. (#12015) +* [88d4e0f14](https://github.com/argoproj/argo-workflows/commit/88d4e0f14e85c7fbf4095536361e609ea08b4e77) fix: Only append slash when missing for Artifactory repoURL (#11812) +* [4627aa047](https://github.com/argoproj/argo-workflows/commit/4627aa047f9631babcabf093c8fc9de6a09dab21) fix: upgrade module for pull image in google cloud issue #9630 (#11614) +* [2368b37e6](https://github.com/argoproj/argo-workflows/commit/2368b37e6b773dacd52e8c8a3393af4747ac62d2) fix: Upgrade Go to v1.21 Fixes #11556 (#11601) +* [63af1c414](https://github.com/argoproj/argo-workflows/commit/63af1c414630ca263e55f221555e308921406cd7) fix(ui): ensure `package.json#name` is not the same as `argo-ui` (#11595) +* [c9f96f446](https://github.com/argoproj/argo-workflows/commit/c9f96f44693392ee82134da51324525e37802d52) fix: Devcontainer resets /etc/hosts (#11439) (#11440) +* [b23713e4b](https://github.com/argoproj/argo-workflows/commit/b23713e4b3db4ff847efd20a0765c88c1c22eb23) fix: make archived logs more human friendly in UI (#11420) +* [660bbb68f](https://github.com/argoproj/argo-workflows/commit/660bbb68f2e878700cb256898c68c75f00ee99d1) fix: Live workflow takes precedence during merge to correctly display in the UI (#11336) +* [a4ca4d27e](https://github.com/argoproj/argo-workflows/commit/a4ca4d27e92b83b52b3f79b850524f65b9b4a795) fix: add space to fix release action issue (#11160) +* [5fe8b37a6](https://github.com/argoproj/argo-workflows/commit/5fe8b37a63bcf03051c6c3fbe01580c344eda07d) fix: upgrade `argo-ui` components to latest (3.4 backport) (#12998) + +### Contributors + +* Alan Clucas +* AlbeeSo +* AloysAqemia +* Andrei Shevchenko +* Anton Gilgur +* Bryce-Huang +* Byeonggon Lee +* Dennis Lawler +* Denys Melnyk +* Eduardo Rodrigues +* Helge Willum Thingvad +* Isitha Subasinghe +* João Pedro +* Raffael +* Ruin09 +* Ryan Currah +* Shiwei Tang +* Shunsuke Suzuki +* Son Bui +* Tal Yitzhak +* Tianchu Zhao +* Weidong Cai +* Yang Lu +* Yuan (Terry) Tang +* Yuan Tang +* Yulin Li +* dependabot[bot] +* guangwu +* gussan +* ivancili +* jiangjiang +* jswxstw +* junkmm +* neosu +* shuangkun tian +* static-moonlight +* sycured + ## v3.4.16 (2024-01-14) * [910a9aabc](https://github.com/argoproj/argo-workflows/commit/910a9aabce5de6568b54350c181a431f8263605a) fix: Fix lint build diff --git a/Dockerfile b/Dockerfile index 8bf862bed9a1..e4f39f0bb82c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -71,6 +71,8 @@ ARG GIT_TREE_STATE RUN mkdir -p ui/dist COPY --from=argo-ui ui/dist/app ui/dist/app +# update timestamp so that `make` doesn't try to rebuild this -- it was already built in the previous stage +RUN touch ui/dist/app/index.html RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build STATIC_FILES=true make dist/argo GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE} diff --git a/Makefile b/Makefile index a3d3cb7e19ba..33e2c947d795 100644 --- a/Makefile +++ b/Makefile @@ -685,6 +685,8 @@ endif docs-spellcheck: /usr/local/bin/mdspell # check docs for spelling mistakes mdspell --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name README.md -not -name fields.md -not -name upgrading.md -not -name swagger.md -not -name executor_swagger.md -not -path '*/cli/*') + # alphabetize spelling file -- ignore first line (comment), then sort the rest case-sensitive and remove duplicates + $(shell cat .spelling | awk 'NR<2{ print $0; next } { print $0 | "sort" }' | uniq | tee .spelling > /dev/null) /usr/local/bin/markdown-link-check: # update this in Nix when upgrading it here diff --git a/OWNERS b/OWNERS index 93da78fc825e..87dd83f32899 100644 --- a/OWNERS +++ b/OWNERS @@ -3,14 +3,15 @@ owners: - terrytangyuan approvers: +- agilgur5 - alexec - alexmt - edlee2121 +- isubasinghe - jessesuen +- joibel - juliev0 +- tczhao reviewers: -- agilgur5 -- isubasinghe -- joibel -- tczhao +- shuangkun diff --git a/README.md b/README.md index b50e516148a1..9bc8242e2f41 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,22 @@ -[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) -[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830) +[![Security Status](https://github.com/argoproj/argo-workflows/workflows/Snyk/badge.svg)](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml?query=branch%3Amain) +[![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows) -[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows) +[![FOSSA License Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows?ref=badge_shield) +[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj) +[![LinkedIn](https://img.shields.io/badge/LinkedIn-argoproj-blue.svg?logo=linkedin)](https://www.linkedin.com/company/argoproj/) +[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-workflows?label=argo-workflows)](https://github.com/argoproj/argo-workflows/releases/latest) +[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows) ## What is Argo Workflows? -Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo -Workflows is implemented as a Kubernetes CRD (Custom Resource Definition). +Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. +Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition). -* Define workflows where each step in the workflow is a container. -* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic - graph (DAG). -* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo - Workflows on Kubernetes. +* Define workflows where each step is a container. +* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic graph (DAG). +* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo Workflows on Kubernetes. Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) graduated project. @@ -30,15 +32,17 @@ Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) graduated * Argo Workflows is the most popular workflow execution engine for Kubernetes. * Light-weight, scalable, and easier to use. -* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based - environments. +* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments. * Cloud agnostic and can run on any Kubernetes cluster. [Read what people said in our latest survey](https://blog.argoproj.io/argo-workflows-events-2023-user-survey-results-82c53bc30543) ## Try Argo Workflows -[Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo) (login using Github) +You can try Argo Workflows via one of the following: + +1. [Interactive Training Material](https://killercoda.com/argoproj/course/argo-workflows/) +1. [Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo) ![Screenshot](docs/assets/screenshot.png) @@ -57,9 +61,9 @@ Just some of the projects that use or rely on Argo Workflows (complete list [her * [Kedro](https://kedro.readthedocs.io/en/stable/) * [Kubeflow Pipelines](https://github.com/kubeflow/pipelines) * [Netflix Metaflow](https://metaflow.org) -* [Onepanel](https://www.onepanel.ai/) +* [Onepanel](https://github.com/onepanelio/onepanel) * [Orchest](https://github.com/orchest/orchest/) -* [Piper](https://github.com/rookout/piper) +* [Piper](https://github.com/quickube/piper) * [Ploomber](https://github.com/ploomber/ploomber) * [Seldon](https://github.com/SeldonIO/seldon-core) * [SQLFlow](https://github.com/sql-machine-learning/sqlflow) @@ -123,12 +127,10 @@ An incomplete list of features Argo Workflows provide: ## Community Meetings -We host monthly community meetings where we and the community showcase demos and discuss the current and future state of -the project. Feel free to join us! For Community Meeting information, minutes and recordings -please [see here](https://bit.ly/argo-wf-cmty-mtng). +We host monthly community meetings where we and the community showcase demos and discuss the current and future state of the project. Feel free to join us! +For Community Meeting information, minutes and recordings, please [see here](https://bit.ly/argo-wf-cmty-mtng). -Participation in the Argo Workflows project is governed by -the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) +Participation in Argo Workflows is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) ## Community Blogs and Presentations diff --git a/SECURITY.md b/SECURITY.md index 02136b30d74c..96d668b849eb 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -6,9 +6,10 @@ If you find a security related bug in Argo Workflows, we kindly ask you for resp disclosure and for giving us appropriate time to react, analyze and develop a fix to mitigate the found security vulnerability. -Please report vulnerabilities by e-mail to the following address: +Please report vulnerabilities by: -* cncf-argo-security@lists.cncf.io +* Opening a draft GitHub Security Advisory: https://github.com/argoproj/argo-workflows/security/advisories/new +* Sending an e-mail to the following address: cncf-argo-security@lists.cncf.io All vulnerabilities and associated information will be treated with full confidentiality. diff --git a/USERS.md b/USERS.md index 56a972a5a7ee..691559a1c31b 100644 --- a/USERS.md +++ b/USERS.md @@ -49,6 +49,7 @@ Currently, the following organizations are **officially** using Argo Workflows: 1. [CCRi](https://www.ccri.com/) 1. [Cisco](https://www.cisco.com/) 1. [Cloud Scale](https://cloudscaleinc.com/) +1. [CloudGeometry](https://www.cloudgeometry.io/) 1. [CloudSeeds](https://www.cloudseeds.de/) 1. [Codec](https://www.codec.ai/) 1. [Codefresh](https://www.codefresh.io/) @@ -72,6 +73,7 @@ Currently, the following organizations are **officially** using Argo Workflows: 1. [DevSamurai](https://www.devsamurai.com/) 1. [Devtron Labs](https://github.com/devtron-labs/devtron) 1. [DLR](https://www.dlr.de/eoc/) +1. [DP Technology](https://www.dp.tech/) 1. [Dyno Therapeutics](https://dynotx.com) 1. [EBSCO Information Services](https://www.ebsco.com/) 1. [Enso Finance](https://enso.finance/) @@ -140,6 +142,7 @@ Currently, the following organizations are **officially** using Argo Workflows: 1. [Orchest](https://www.orchest.io/) 1. [OVH](https://www.ovh.com/) 1. [PathAI](https://www.pathai.com) +1. [PayIt](https://payitgov.com/) 1. [PDOK](https://www.pdok.nl/) 1. [Peak AI](https://www.peak.ai/) 1. [Phrase](https://phrase.com) @@ -170,11 +173,13 @@ Currently, the following organizations are **officially** using Argo Workflows: 1. [Schlumberger](https://slb.com/) 1. [Securitas](https://securitas.com/) 1. [SegmentStream](https://segmentstream.com) +1. [Semgrep](https://semgrep.com) 1. [Sendible](https://sendible.com) 1. [Sidecar Technologies](https://hello.getsidecar.com/) 1. [smallcase](https://smallcase.com/) 1. [Softonic](https://hello.softonic.com/) 1. [Sohu](https://www.sohu.com/) +1. [SternumIOT](https://www.sternumiot.com) 1. [Stillwater Supercomputing, Inc](http://www.stillwater-sc.com/) 1. [StreamNative](https://streamnative.io) 1. [strongDM](https://www.strongdm.com/) diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json index ff8a6e4b04a2..f55f5a95864f 100644 --- a/api/jsonschema/schema.json +++ b/api/jsonschema/schema.json @@ -4345,6 +4345,7 @@ "type": "object" }, "io.argoproj.workflow.v1alpha1.ContainerSetRetryStrategy": { + "description": "ContainerSetRetryStrategy provides controls on how to retry a container set", "properties": { "duration": { "description": "Duration is the time between each retry, examples values are \"300ms\", \"1s\" or \"5m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", @@ -4352,7 +4353,7 @@ }, "retries": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", - "description": "Nbr of retries" + "description": "Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`." } }, "required": [ @@ -4370,7 +4371,7 @@ }, "retryStrategy": { "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ContainerSetRetryStrategy", - "description": "RetryStrategy describes how to retry a container nodes in the container set if it fails. Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set." + "description": "RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers." }, "volumeMounts": { "items": { diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 773cc59c8228..cf75694ae9db 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -8378,6 +8378,7 @@ } }, "io.argoproj.workflow.v1alpha1.ContainerSetRetryStrategy": { + "description": "ContainerSetRetryStrategy provides controls on how to retry a container set", "type": "object", "required": [ "retries" @@ -8388,7 +8389,7 @@ "type": "string" }, "retries": { - "description": "Nbr of retries", + "description": "Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } } @@ -8406,7 +8407,7 @@ } }, "retryStrategy": { - "description": "RetryStrategy describes how to retry a container nodes in the container set if it fails. Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set.", + "description": "RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers.", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ContainerSetRetryStrategy" }, "volumeMounts": { diff --git a/cmd/argo/commands/clustertemplate/create.go b/cmd/argo/commands/clustertemplate/create.go index ea7ea4acd330..5e739a22041d 100644 --- a/cmd/argo/commands/clustertemplate/create.go +++ b/cmd/argo/commands/clustertemplate/create.go @@ -25,6 +25,16 @@ func NewCreateCommand() *cobra.Command { command := &cobra.Command{ Use: "create FILE1 FILE2...", Short: "create a cluster workflow template", + Example: `# Create a Cluster Workflow Template: + argo cluster-template create FILE1 + +# Create a Cluster Workflow Template and print it as YAML: + argo cluster-template create FILE1 --output yaml + +# Create a Cluster Workflow Template with relaxed validation: + argo cluster-template create FILE1 --strict false +`, + Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { cmd.HelpFunc()(cmd, args) diff --git a/cmd/argo/commands/clustertemplate/list.go b/cmd/argo/commands/clustertemplate/list.go index d26fd1243853..d720f58b9fb7 100644 --- a/cmd/argo/commands/clustertemplate/list.go +++ b/cmd/argo/commands/clustertemplate/list.go @@ -22,6 +22,15 @@ func NewListCommand() *cobra.Command { command := &cobra.Command{ Use: "list", Short: "list cluster workflow templates", + Example: `# List Cluster Workflow Templates: + argo cluster-template list + +# List Cluster Workflow Templates with additional details such as labels, annotations, and status: + argo cluster-template list --output wide + +# List Cluster Workflow Templates by name only: + argo cluster-template list -o name +`, Run: func(cmd *cobra.Command, args []string) { ctx, apiClient := client.NewAPIClient(cmd.Context()) serviceClient, err := apiClient.NewClusterWorkflowTemplateServiceClient() diff --git a/cmd/argo/commands/cp.go b/cmd/argo/commands/cp.go index d175a032b285..36ca1230590a 100644 --- a/cmd/argo/commands/cp.go +++ b/cmd/argo/commands/cp.go @@ -17,6 +17,7 @@ import ( "github.com/argoproj/argo-workflows/v3/pkg/apiclient" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + wfutil "github.com/argoproj/argo-workflows/v3/workflow/util" ) func NewCpCommand() *cobra.Command { @@ -82,7 +83,7 @@ func NewCpCommand() *cobra.Command { if nodeInfo == nil { return fmt.Errorf("could not get node status for node ID %s", artifact.NodeID) } - customPath = strings.Replace(customPath, "{templateName}", nodeInfo.TemplateName, 1) + customPath = strings.Replace(customPath, "{templateName}", wfutil.GetTemplateFromNode(*nodeInfo), 1) customPath = strings.Replace(customPath, "{namespace}", namespace, 1) customPath = strings.Replace(customPath, "{workflowName}", workflowName, 1) customPath = strings.Replace(customPath, "{nodeId}", artifact.NodeID, 1) diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go index ae513619b156..189797238d42 100644 --- a/cmd/argo/commands/list.go +++ b/cmd/argo/commands/list.go @@ -64,6 +64,34 @@ func NewListCommand() *cobra.Command { command := &cobra.Command{ Use: "list", Short: "list workflows", + Example: `# List all workflows: + argo list + +# List all workflows from all namespaces: + argo list -A + +# List all running workflows: + argo list --running + +# List all completed workflows: + argo list --completed + + # List workflows created within the last 10m: + argo list --since 10m + +# List workflows that finished more than 2h ago: + argo list --older 2h + +# List workflows with more information (such as parameters): + argo list -o wide + +# List workflows in YAML format: + argo list -o yaml + +# List workflows that have both labels: + argo list -l label1=value1,label2=value2 +`, + Run: func(cmd *cobra.Command, args []string) { ctx, apiClient := client.NewAPIClient(cmd.Context()) serviceClient := apiClient.NewWorkflowServiceClient() diff --git a/cmd/argo/commands/root.go b/cmd/argo/commands/root.go index 4571545b0afc..687b61f85702 100644 --- a/cmd/argo/commands/root.go +++ b/cmd/argo/commands/root.go @@ -34,7 +34,7 @@ func NewCommand() *cobra.Command { Long: ` You can use the CLI in the following modes: -# Kubernetes API Mode (default) +#### Kubernetes API Mode (default) Requests are sent directly to the Kubernetes API. No Argo Server is needed. Large workflows and the workflow archive are not supported. @@ -44,9 +44,9 @@ If you're using instance ID (which is very unlikely), you'll need to set it: ARGO_INSTANCEID=your-instanceid -# Argo Server GRPC Mode +#### Argo Server GRPC Mode -Requests are sent to the Argo Server API via GRPC (using HTTP/2). Large workflows and the workflow archive are supported. Network load-balancers that do not support HTTP/2 are not supported. +Requests are sent to the Argo Server API via GRPC (using HTTP/2). Large workflows and the workflow archive are supported. Network load-balancers that do not support HTTP/2 are not supported. Use if you do not have access to the Kubernetes API (e.g. you're in another cluster), and you're running the Argo Server using a network load-balancer that support HTTP/2. @@ -67,14 +67,14 @@ By default, the CLI uses your KUBECONFIG to determine default for ARGO_TOKEN and KUBECONFIG=/dev/null You will then need to set: - - ARGO_NAMESPACE=argo + + ARGO_NAMESPACE=argo And: - ARGO_TOKEN='Bearer ******' ;# Should always start with "Bearer " or "Basic ". + ARGO_TOKEN='Bearer ******' ;# Should always start with "Bearer " or "Basic ". -# Argo Server HTTP1 Mode +#### Argo Server HTTP1 Mode As per GRPC mode, but uses HTTP. Can be used with ALB that does not support HTTP/2. The command "argo logs --since-time=2020...." will not work (due to time-type). diff --git a/cmd/argo/commands/stop.go b/cmd/argo/commands/stop.go index 43a89b0782f3..98cff94dfd18 100644 --- a/cmd/argo/commands/stop.go +++ b/cmd/argo/commands/stop.go @@ -72,7 +72,7 @@ func NewStopCommand() *cobra.Command { command.Flags().StringVar(&stopArgs.nodeFieldSelector, "node-field-selector", "", "selector of node to stop, eg: --node-field-selector inputs.paramaters.myparam.value=abc") command.Flags().StringVarP(&stopArgs.labelSelector, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") command.Flags().StringVar(&stopArgs.fieldSelector, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") - command.Flags().BoolVar(&stopArgs.dryRun, "dry-run", false, "If true, only stop the workflows that would be stopped, without stopping them.") + command.Flags().BoolVar(&stopArgs.dryRun, "dry-run", false, "If true, only print the workflows that would be stopped, without stopping them.") return command } diff --git a/cmd/argo/commands/submit.go b/cmd/argo/commands/submit.go index ef5da1421434..ad097616ee29 100644 --- a/cmd/argo/commands/submit.go +++ b/cmd/argo/commands/submit.go @@ -51,6 +51,10 @@ func NewSubmitCommand() *cobra.Command { # Submit a single workflow from an existing resource argo submit --from cronwf/my-cron-wf + +# Submit multiple workflows from stdin: + + cat my-wf.yaml | argo submit - `, Run: func(cmd *cobra.Command, args []string) { if cmd.Flag("priority").Changed { diff --git a/docs/README.md b/docs/README.md index b0b54c1d7c7d..f6f2766316b7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,20 +1,22 @@ -[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) -[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830) +[![Security Status](https://github.com/argoproj/argo-workflows/workflows/Snyk/badge.svg)](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml?query=branch%3Amain) +[![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows) -[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows) +[![FOSSA License Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows?ref=badge_shield) +[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj) +[![LinkedIn](https://img.shields.io/badge/LinkedIn-argoproj-blue.svg?logo=linkedin)](https://www.linkedin.com/company/argoproj/) +[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-workflows?label=argo-workflows)](https://github.com/argoproj/argo-workflows/releases/latest) +[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows) ## What is Argo Workflows? -Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo -Workflows is implemented as a Kubernetes CRD (Custom Resource Definition). +Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. +Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition). -* Define workflows where each step in the workflow is a container. -* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic - graph (DAG). -* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo - Workflows on Kubernetes. +* Define workflows where each step is a container. +* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic graph (DAG). +* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo Workflows on Kubernetes. Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) graduated project. @@ -30,15 +32,17 @@ Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) graduated * Argo Workflows is the most popular workflow execution engine for Kubernetes. * Light-weight, scalable, and easier to use. -* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based - environments. +* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments. * Cloud agnostic and can run on any Kubernetes cluster. [Read what people said in our latest survey](https://blog.argoproj.io/argo-workflows-events-2023-user-survey-results-82c53bc30543) ## Try Argo Workflows -[Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo) (login using Github) +You can try Argo Workflows via one of the following: + +1. [Interactive Training Material](https://killercoda.com/argoproj/course/argo-workflows/) +1. [Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo) ![Screenshot](assets/screenshot.png) @@ -57,9 +61,9 @@ Just some of the projects that use or rely on Argo Workflows (complete list [her * [Kedro](https://kedro.readthedocs.io/en/stable/) * [Kubeflow Pipelines](https://github.com/kubeflow/pipelines) * [Netflix Metaflow](https://metaflow.org) -* [Onepanel](https://www.onepanel.ai/) +* [Onepanel](https://github.com/onepanelio/onepanel) * [Orchest](https://github.com/orchest/orchest/) -* [Piper](https://github.com/rookout/piper) +* [Piper](https://github.com/quickube/piper) * [Ploomber](https://github.com/ploomber/ploomber) * [Seldon](https://github.com/SeldonIO/seldon-core) * [SQLFlow](https://github.com/sql-machine-learning/sqlflow) @@ -123,12 +127,10 @@ An incomplete list of features Argo Workflows provide: ## Community Meetings -We host monthly community meetings where we and the community showcase demos and discuss the current and future state of -the project. Feel free to join us! For Community Meeting information, minutes and recordings -please [see here](https://bit.ly/argo-wf-cmty-mtng). +We host monthly community meetings where we and the community showcase demos and discuss the current and future state of the project. Feel free to join us! +For Community Meeting information, minutes and recordings, please [see here](https://bit.ly/argo-wf-cmty-mtng). -Participation in the Argo Workflows project is governed by -the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) +Participation in Argo Workflows is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) ## Community Blogs and Presentations diff --git a/docs/argo-server-sso-argocd.md b/docs/argo-server-sso-argocd.md index 724f2c8d7fc9..e27a50897ab1 100644 --- a/docs/argo-server-sso-argocd.md +++ b/docs/argo-server-sso-argocd.md @@ -54,6 +54,7 @@ data: dex.config: | # Setting staticClients allows Argo Workflows to use Argo CD's Dex installation for authentication staticClients: + # This is the OIDC client ID in plaintext - id: argo-workflows-sso name: Argo Workflow redirectURIs: diff --git a/docs/async-pattern.md b/docs/async-pattern.md index ba1759e51e55..24300633a5c2 100644 --- a/docs/async-pattern.md +++ b/docs/async-pattern.md @@ -21,39 +21,44 @@ kind: WorkflowTemplate metadata: name: external-job-template spec: + entrypoint: run-external-job + arguments: + parameters: + - name: "job-cmd" templates: - - name: run-external-job - inputs: - parameters: - - name: "job-cmd" - steps: - - - name: trigger-job - template: trigger-job - arguments: - parameters: - - name: "job-cmd" - value: "{{inputs.parameters.job-cmd}}" - - - name: wait-completion - template: wait-completion - arguments: - parameters: - - name: uuid - value: "{{steps.trigger-job.outputs.result}}" - - - name: trigger-job - inputs: - parameters: - - name: "job-cmd" - value: "{{inputs.parameters.job-cmd}}" - image: appropriate/curl:latest - command: ["/bin/sh", "-c"] - args: ["{{inputs.parameters.cmd}}"] - - - name: wait-completion - inputs: - parameters: - - name: uuid - suspend: {} + - name: run-external-job + inputs: + parameters: + - name: "job-cmd" + value: "{{workflow.parameters.job-cmd}}" + steps: + - - name: trigger-job + template: trigger-job + arguments: + parameters: + - name: "job-cmd" + value: "{{inputs.parameters.job-cmd}}" + - - name: wait-completion + template: wait-completion + arguments: + parameters: + - name: uuid + value: "{{steps.trigger-job.outputs.result}}" + + - name: trigger-job + inputs: + parameters: + - name: "job-cmd" + container: + image: appropriate/curl:latest + command: [ "/bin/sh", "-c" ] + args: [ "{{inputs.parameters.job-cmd}}" ] + + - name: wait-completion + inputs: + parameters: + - name: uuid + suspend: { } ``` In this case the ```job-cmd``` parameter can be a command that makes an HTTP call via curl to an endpoint that returns a job UUID. More sophisticated submission and parsing of submission output could be done with something like a Python script step. @@ -66,12 +71,12 @@ You may need an [access token](access-token.md). curl --request PUT \ --url https://localhost:2746/api/v1/workflows///resume --header 'content-type: application/json' \ - --header "Authorization: Bearer $ARGO_TOKEN" \ + --header "Authorization: $ARGO_TOKEN" \ --data '{ "namespace": "", "name": "", "nodeFieldSelector": "inputs.parameters.uuid.value=" - }' + }' ``` or stop if unsuccessful: @@ -80,13 +85,13 @@ or stop if unsuccessful: curl --request PUT \ --url https://localhost:2746/api/v1/workflows///stop --header 'content-type: application/json' \ - --header "Authorization: Bearer $ARGO_TOKEN" \ + --header "Authorization: $ARGO_TOKEN" \ --data '{ "namespace": "", "name": "", "nodeFieldSelector": "inputs.parameters.uuid.value=", "message": "" - }' + }' ``` ## Retrying failed jobs diff --git a/docs/cli/argo.md b/docs/cli/argo.md index 69af54e3e99f..1ba356e2dce2 100644 --- a/docs/cli/argo.md +++ b/docs/cli/argo.md @@ -7,7 +7,7 @@ argo is the command line interface to Argo You can use the CLI in the following modes: -# Kubernetes API Mode (default) +#### Kubernetes API Mode (default) Requests are sent directly to the Kubernetes API. No Argo Server is needed. Large workflows and the workflow archive are not supported. @@ -17,9 +17,9 @@ If you're using instance ID (which is very unlikely), you'll need to set it: ARGO_INSTANCEID=your-instanceid -# Argo Server GRPC Mode +#### Argo Server GRPC Mode -Requests are sent to the Argo Server API via GRPC (using HTTP/2). Large workflows and the workflow archive are supported. Network load-balancers that do not support HTTP/2 are not supported. +Requests are sent to the Argo Server API via GRPC (using HTTP/2). Large workflows and the workflow archive are supported. Network load-balancers that do not support HTTP/2 are not supported. Use if you do not have access to the Kubernetes API (e.g. you're in another cluster), and you're running the Argo Server using a network load-balancer that support HTTP/2. @@ -40,14 +40,14 @@ By default, the CLI uses your KUBECONFIG to determine default for ARGO_TOKEN and KUBECONFIG=/dev/null You will then need to set: - - ARGO_NAMESPACE=argo + + ARGO_NAMESPACE=argo And: - ARGO_TOKEN='Bearer ******' ;# Should always start with "Bearer " or "Basic ". + ARGO_TOKEN='Bearer ******' ;# Should always start with "Bearer " or "Basic ". -# Argo Server HTTP1 Mode +#### Argo Server HTTP1 Mode As per GRPC mode, but uses HTTP. Can be used with ALB that does not support HTTP/2. The command "argo logs --since-time=2020...." will not work (due to time-type). diff --git a/docs/cli/argo_cluster-template_create.md b/docs/cli/argo_cluster-template_create.md index 9672e14a7f06..5841af4a707e 100644 --- a/docs/cli/argo_cluster-template_create.md +++ b/docs/cli/argo_cluster-template_create.md @@ -6,6 +6,20 @@ create a cluster workflow template argo cluster-template create FILE1 FILE2... [flags] ``` +### Examples + +``` +# Create a Cluster Workflow Template: + argo cluster-template create FILE1 + +# Create a Cluster Workflow Template and print it as YAML: + argo cluster-template create FILE1 --output yaml + +# Create a Cluster Workflow Template with relaxed validation: + argo cluster-template create FILE1 --strict false + +``` + ### Options ``` diff --git a/docs/cli/argo_cluster-template_list.md b/docs/cli/argo_cluster-template_list.md index 0a9ea4147fb2..9623938fc810 100644 --- a/docs/cli/argo_cluster-template_list.md +++ b/docs/cli/argo_cluster-template_list.md @@ -6,6 +6,20 @@ list cluster workflow templates argo cluster-template list [flags] ``` +### Examples + +``` +# List Cluster Workflow Templates: + argo cluster-template list + +# List Cluster Workflow Templates with additional details such as labels, annotations, and status: + argo cluster-template list --output wide + +# List Cluster Workflow Templates by name only: + argo cluster-template list -o name + +``` + ### Options ``` diff --git a/docs/cli/argo_list.md b/docs/cli/argo_list.md index f9497332fd44..e384c46dccb1 100644 --- a/docs/cli/argo_list.md +++ b/docs/cli/argo_list.md @@ -6,6 +6,38 @@ list workflows argo list [flags] ``` +### Examples + +``` +# List all workflows: + argo list + +# List all workflows from all namespaces: + argo list -A + +# List all running workflows: + argo list --running + +# List all completed workflows: + argo list --completed + + # List workflows created within the last 10m: + argo list --since 10m + +# List workflows that finished more than 2h ago: + argo list --older 2h + +# List workflows with more information (such as parameters): + argo list -o wide + +# List workflows in YAML format: + argo list -o yaml + +# List workflows that have both labels: + argo list -l label1=value1,label2=value2 + +``` + ### Options ``` diff --git a/docs/cli/argo_stop.md b/docs/cli/argo_stop.md index 5dbcc31a6747..3e00d790684f 100644 --- a/docs/cli/argo_stop.md +++ b/docs/cli/argo_stop.md @@ -34,7 +34,7 @@ argo stop WORKFLOW WORKFLOW2... [flags] ### Options ``` - --dry-run If true, only stop the workflows that would be stopped, without stopping them. + --dry-run If true, only print the workflows that would be stopped, without stopping them. --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type. -h, --help help for stop --message string Message to add to previously running nodes diff --git a/docs/cli/argo_submit.md b/docs/cli/argo_submit.md index 3c1aa1cdbacd..583fadb5164b 100644 --- a/docs/cli/argo_submit.md +++ b/docs/cli/argo_submit.md @@ -29,6 +29,10 @@ argo submit [FILE... | --from `kind/name] [flags] argo submit --from cronwf/my-cron-wf +# Submit multiple workflows from stdin: + + cat my-wf.yaml | argo submit - + ``` ### Options diff --git a/docs/cluster-workflow-templates.md b/docs/cluster-workflow-templates.md index 4d133bf502b1..d2d9e0d1570f 100644 --- a/docs/cluster-workflow-templates.md +++ b/docs/cluster-workflow-templates.md @@ -140,7 +140,7 @@ argo submit https://raw.githubusercontent.com/argoproj/argo-workflows/main/examp Then submit a `ClusterWorkflowTemplate` as a `Workflow`: ```bash -argo submit --from clusterworkflowtemplate/workflow-template-submittable +argo submit --from clusterworkflowtemplate/cluster-workflow-template-submittable ``` ### `kubectl` diff --git a/docs/conditional-artifacts-parameters.md b/docs/conditional-artifacts-parameters.md index 1e35295aea65..e7c2403f90a2 100644 --- a/docs/conditional-artifacts-parameters.md +++ b/docs/conditional-artifacts-parameters.md @@ -2,10 +2,8 @@ > v3.1 and after -The Conditional Artifacts and Parameters feature enables to assign the Step/DAG level artifacts or parameters based on -expression. This introduces a new field `fromExpression: ...` under Step/DAG level output artifact and `expression: ...` -under step/DAG level output parameter. Both use the -[expr](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md) syntax. +You can set Step/DAG level artifacts or parameters based on an [expression](variables.md#expression). +Use `fromExpression` under a Step/DAG level output artifact and `expression` under a Step/DAG level output parameter. ## Conditional Artifacts @@ -51,22 +49,6 @@ under step/DAG level output parameter. Both use the expression: "steps['flip-coin'].outputs.result == 'heads' ? steps.heads.outputs.result : steps.tails.outputs.result" ``` -## Built-In Functions - -Convenient functions added to support more use cases: - -1. `asInt` - convert the string to integer (e.g: `asInt('1')`) -2. `asFloat` - convert the string to Float (e.g: `asFloat('1.23')`) -3. `string` - convert the int/float to string (e.g: `string(1)`) -4. `jsonpath` - Extract the element from JSON using JSON Path ( - e.g: `jsonpath('{"employee":{"name":"sonoo","salary":56000,"married":true}}", "$.employee.name" )` ) -5. [Sprig](http://masterminds.github.io/sprig/) - Support all `sprig` functions - -!!! NOTE - Expressions will decode the `-` as operator if template name has `-`, it will fail the expression. So here solution - for template name which has `-` in its name. `step['one-two-three'].outputs.artifacts` - * [Steps parameter example](https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/conditional-parameters.yaml) * [DAG parameter example](https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/dag-conditional-parameters.yaml) * [Advanced example: fibonacci Sequence](https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/fibonacci-seq-conditional-param.yaml) - diff --git a/docs/configure-archive-logs.md b/docs/configure-archive-logs.md index 023f66366c4b..44f8be295816 100644 --- a/docs/configure-archive-logs.md +++ b/docs/configure-archive-logs.md @@ -1,6 +1,8 @@ # Configuring Archive Logs -⚠️ We do not recommend you rely on Argo Workflows to archive logs. Instead, use a conventional Kubernetes logging facility. +!!! Warning "Not recommended" + We do not recommend relying on Argo to archive logs as it is naive and not purpose-built for indexing, searching, and storing logs. + This feature is provided as a convenience to quickly view logs of garbage collected Pods in the Argo UI, but we [recommend](#suggested-alternatives) you integrate a dedicated, Kubernetes-aware logging facility. To enable automatic pipeline logging, you need to configure `archiveLogs` at workflow-controller config-map, workflow spec, or template level. You also need to configure [Artifact Repository](configure-artifact-repository.md) to define where this logging artifact is stored. @@ -59,3 +61,19 @@ spec: archiveLocation: archiveLogs: true ``` + +## Suggested alternatives + +Argo's log storage is naive and will not reach feature parity with purpose-built facilities optimized for indexing, searching, and storing logs. Some open-source tools include: + +* [`fluentd`](https://github.com/fluent/fluentd) for collection +* [ELK](https://www.elastic.co/elastic-stack/) as storage, querying and a UI +* [`promtail`](https://grafana.com/docs/loki/latest/send-data/promtail/) for collection +* [`loki`](https://grafana.com/docs/loki/latest/) for storage and querying +* [`grafana`](https://grafana.com/docs/grafana/latest/) for a UI + +You can add [links](links.md) to connect from the Argo UI to your logging facility's UI. See examples in the [`workflow-controller-configmap.yaml`](workflow-controller-configmap.yaml). + +* Link `scope: workflow` to the logs of a Workflow +* Link `scope: pod-logs` to the logs of a specific Pod of a Workflow +* Parametrize the link with `${metadata.name}`, `${metadata.namespace}`, `${metadata.labels}`, and other available metadata diff --git a/docs/container-set-template.md b/docs/container-set-template.md index 697a3b09ee7e..2108fa3d08b9 100644 --- a/docs/container-set-template.md +++ b/docs/container-set-template.md @@ -2,7 +2,7 @@ > v3.1 and after -A container set templates is similar to a normal container or script template, but allows you to specify multiple +A container set template is similar to a normal container or script template, but allows you to specify multiple containers to run within a single pod. Because you have multiple containers within a pod, they will be scheduled on the same host. You can use cheap and fast @@ -66,7 +66,7 @@ All container set templates that have artifacts must/should have a container nam If you want to use base-layer artifacts, `main` must be last to finish, so it must be the root node in the graph. -That is may not be practical. +That may not be practical. Instead, have a workspace volume and make sure all artifacts paths are on that volume. @@ -116,3 +116,73 @@ Example B: Lopsided requests, e.g. `a -> b` where `a` is cheap and `b` is expens Can you see the problem here? `a` only has small requests, but the container set will use the total of all requests. So it's as if you're using all that GPU for 10h. This will be expensive. Solution: do not use container set when you have lopsided requests. + +## Inner `retryStrategy` usage + +> v3.3 and after + +You can set an inner `retryStrategy` to apply to all containers of a container set, including the `duration` between each retry and the total number of `retries`. + +See an example below: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: containerset-with-retrystrategy + annotations: + workflows.argoproj.io/description: | + This workflow creates a container set with a retryStrategy. +spec: + entrypoint: containerset-retrystrategy-example + templates: + - name: containerset-retrystrategy-example + containerSet: + retryStrategy: + retries: "10" # if fails, retry at most ten times + duration: 30s # retry for at most 30s + containers: + # this container completes successfully, so it won't be retried. + - name: success + image: python:alpine3.6 + command: + - python + - -c + args: + - | + print("hi") + # if fails, it will retry at most ten times. + - name: fail-retry + image: python:alpine3.6 + command: ["python", -c] + # fail with a 66% probability + args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"] +``` + + + +!!! Note "Template-level `retryStrategy` vs Container Set `retryStrategy`" + `containerSet.retryStrategy` works differently from [template-level retries](retries.md): + + 1. Your `command` will be re-ran by the Executor inside the same container if it fails. + + - As no new containers are created, the nodes in the UI remain the same, and the retried logs are appended to original container's logs. For example, your container logs may look like: + ```text + time="2024-03-29T06:40:25 UTC" level=info msg="capturing logs" argo=true + intentional failure + time="2024-03-29T06:40:25 UTC" level=debug msg="ignore signal child exited" argo=true + time="2024-03-29T06:40:26 UTC" level=info msg="capturing logs" argo=true + time="2024-03-29T06:40:26 UTC" level=debug msg="ignore signal urgent I/O condition" argo=true + intentional failure + time="2024-03-29T06:40:26 UTC" level=debug msg="ignore signal child exited" argo=true + time="2024-03-29T06:40:26 UTC" level=debug msg="forwarding signal terminated" argo=true + time="2024-03-29T06:40:27 UTC" level=info msg="sub-process exited" argo=true error="" + time="2024-03-29T06:40:27 UTC" level=info msg="not saving outputs - not main container" argo=true + Error: exit status 1 + ``` + + 1. If a container's `command` cannot be located, it will not be retried. + + - As it will fail each time, the retry logic is short-circuited. + + diff --git a/docs/debug-pause.md b/docs/debug-pause.md index d62ef29c5261..d5b565a4e978 100644 --- a/docs/debug-pause.md +++ b/docs/debug-pause.md @@ -68,5 +68,5 @@ In order to have access to the persistence volume used by the workflow step, [` The ephemeral container can be used to perform debugging operations. When debugging has been completed, create the marker file to allow the workflow step to continue. When using process name space sharing container file systems are visible to other containers in the pod through the `/proc/$pid/root` link. ```bash -touch /proc/1/root/run/argo/ctr/main/after +touch /proc/1/root/var/run/argo/ctr/main/after ``` diff --git a/docs/doc-changes.md b/docs/doc-changes.md index 017ca30b8121..580b9de98b74 100644 --- a/docs/doc-changes.md +++ b/docs/doc-changes.md @@ -9,6 +9,7 @@ General guidelines: * Explain when you would want to use a feature. * Provide working examples. * Format code using back-ticks to avoid it being reported as a spelling error. +* Prefer 1 sentence per line of markdown * Follow the recommendations in the official [Kubernetes Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/). * Particularly useful sections include [Content best practices](https://kubernetes.io/docs/contribute/style/style-guide/#content-best-practices) and [Patterns to avoid](https://kubernetes.io/docs/contribute/style/style-guide/#patterns-to-avoid). * **Note**: Argo does not use the same tooling, so the sections on "shortcodes" and "EditorConfig" are not relevant. diff --git a/docs/executor_swagger.md b/docs/executor_swagger.md index e8100fb5af85..9cd0f0aadc3f 100644 --- a/docs/executor_swagger.md +++ b/docs/executor_swagger.md @@ -1026,10 +1026,13 @@ referred to by services. ### ContainerSetRetryStrategy +> ContainerSetRetryStrategy provides controls on how to retry a container set + + **Properties** | Name | Type | Go type | Required | Default | Description | Example | diff --git a/docs/faq.md b/docs/faq.md index f46ee59c52c6..5cd16908399e 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -5,7 +5,7 @@ You may not have configured Argo Server authentication correctly. If you want SSO, try running with `--auth-mode=sso`. -If you're using `--auth-mode=client`, make sure you have `Bearer` in front of the token, as mentioned in [Access Token](access-token.md#token-creation). +If you're using `--auth-mode=client`, make sure you have `Bearer` in front of the ServiceAccount Secret, as mentioned in [Access Token](access-token.md#token-creation). [Learn more about the Argo Server set-up](argo-server.md) @@ -21,11 +21,11 @@ Is there an RBAC error? [Learn more about workflow RBAC](workflow-rbac.md) -## Return "unknown (get pods)" error +## `cannot patch resource "pods" in API group ""` error You're probably getting a permission denied error because your RBAC is not configured. -[Learn more about workflow RBAC](workflow-rbac.md) and [even more details](https://blog.argoproj.io/demystifying-argo-workflowss-kubernetes-rbac-7a1406d446fc) +[Learn more about workflow RBAC](workflow-rbac.md) ## There is an error about `/var/run/docker.sock` diff --git a/docs/fields.md b/docs/fields.md index 79cecc220ef1..9f1fc5b16030 100644 --- a/docs/fields.md +++ b/docs/fields.md @@ -339,6 +339,8 @@ Workflow is the definition of a workflow resource - [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) @@ -759,6 +761,8 @@ WorkflowSpec is the specification of a Workflow. - [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) @@ -1194,6 +1198,8 @@ CronWorkflowSpec is the specification of a CronWorkflow - [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) @@ -2387,7 +2393,7 @@ _No description available_ | Field Name | Field Type | Description | |:----------:|:----------:|---------------| |`containers`|`Array<`[`ContainerNode`](#containernode)`>`|_No description available_| -|`retryStrategy`|[`ContainerSetRetryStrategy`](#containersetretrystrategy)|RetryStrategy describes how to retry a container nodes in the container set if it fails. Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set.| +|`retryStrategy`|[`ContainerSetRetryStrategy`](#containersetretrystrategy)|RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers.| |`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|_No description available_| ## DAGTemplate @@ -2882,6 +2888,8 @@ ScriptTemplate is a template subtype to enable scripting through code steps - [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) @@ -3073,6 +3081,8 @@ WorkflowStep is a reference to a template to execute in a series of step - [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) @@ -3734,7 +3744,7 @@ _No description available_ ## ContainerSetRetryStrategy -_No description available_ +ContainerSetRetryStrategy provides controls on how to retry a container set
Examples with this field (click to open) @@ -3766,7 +3776,7 @@ _No description available_ | Field Name | Field Type | Description | |:----------:|:----------:|---------------| |`duration`|`string`|Duration is the time between each retry, examples values are "300ms", "1s" or "5m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".| -|`retries`|[`IntOrString`](#intorstring)|Nbr of retries| +|`retries`|[`IntOrString`](#intorstring)|Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`.| ## DAGTask @@ -3923,6 +3933,8 @@ DataSource sources external data into a data template - [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml)
@@ -4067,6 +4079,8 @@ Sequence expands a workflow step into numeric range - [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) @@ -4839,6 +4853,8 @@ ObjectMeta is metadata that all persisted resources must have, which includes al - [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) @@ -6164,6 +6180,8 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and - [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) diff --git a/docs/installation.md b/docs/installation.md index 17e52de3bbb4..a4dba6bc0593 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -37,6 +37,7 @@ Determine your base installation option. Review the following: +* [Workflow RBAC](workflow-rbac.md) * [Security](security.md). * [Scaling](scaling.md) and [running at massive scale](running-at-massive-scale.md). * [High-availability](high-availability.md) diff --git a/docs/managed-namespace.md b/docs/managed-namespace.md index 9fa84374dd54..966c8943f000 100644 --- a/docs/managed-namespace.md +++ b/docs/managed-namespace.md @@ -2,13 +2,12 @@ > v2.5 and after -You can install Argo in either cluster scoped or namespace scope configurations. -This dictates if you must set-up cluster roles or normal roles. +You can install Argo in either namespace scoped or cluster scoped configurations. +The main difference is whether you install Roles or ClusterRoles, respectively. -In namespace scope configuration, you must run both the Workflow Controller and -Argo Server using `--namespaced`. If you would like to have the workflows running in a separate -namespace, add `--managed-namespace` as well. (In cluster scope installation, don't include `--namespaced` -or `--managed-namespace`.) +In namespace scoped configuration, you must run both the Workflow Controller and Argo Server using `--namespaced`. +If you want to run workflows in a separate namespace, add `--managed-namespace` as well. +(In cluster scoped configuration, _don't_ include `--namespaced` or `--managed-namespace`.) For example: @@ -23,7 +22,7 @@ For example: - default ``` -Please mind that both cluster scoped and namespace scoped configurations require "admin" role because some custom resource (CRD) must be created (and CRD is always a cluster level object) +Please note that both cluster scoped and namespace scoped configurations require "admin" roles to install because Argo's Custom Resource Definitions (CRDs) must be created (CRDs are cluster scoped objects). !!! Info "Example Use Case" You can use a managed namespace install if you want some users or services to run Workflows without granting them privileges in the namespace where Argo Workflows is installed. diff --git a/docs/memoization.md b/docs/memoization.md index cdf2174cc746..8c7f64959efd 100644 --- a/docs/memoization.md +++ b/docs/memoization.md @@ -57,4 +57,4 @@ spec: * Reduce the size of the output parameters for the nodes that are being memoized. * Split your cache into different memoization keys and cache names so that each cache entry is small. 1. My step isn't getting memoized, why not? - Ensure that you have specified at least one output on the step. + If you are running workflows <3.5 ensure that you have specified at least one output on the step. diff --git a/docs/metrics.md b/docs/metrics.md index be81f5be766a..e3ff65e13796 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -90,7 +90,7 @@ Number of API requests sent to the Kubernetes API. #### `argo_workflows_operation_duration_seconds` -A histogram of durations of operations. +A histogram of durations of operations. An operation is a single workflow reconciliation loop within the workflow-controller. It's the time for the controller to process a single workflow after it has been read from the cluster and is a measure of the performance of the controller affected by the complexity of the workflow. #### `argo_workflows_pods_count` @@ -344,7 +344,7 @@ metricsConfig: | # Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics" path: /metrics - + # Port is the port where metrics are emitted. Default is "9090" port: 8080 diff --git a/docs/quick-start.md b/docs/quick-start.md index 2901ca6b18ba..858ad3b3bd7c 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -1,110 +1,89 @@ # Quick Start -To see how Argo Workflows work, you can install it and run examples of simple workflows. +To try out Argo Workflows, you can install it and run example workflows. -Before you start you need a Kubernetes cluster and `kubectl` set up to be able to access that cluster. For the purposes of getting up and running, a local cluster is fine. You could consider the following local Kubernetes cluster options: +Alternatively, if you don't want to set up a Kubernetes cluster, try the [Killercoda course](training.md#hands-on). + +## Prerequisites + +Before installing Argo, you need a Kubernetes cluster and `kubectl` configured to access it. +For quick testing, you can use a local cluster with: * [minikube](https://minikube.sigs.k8s.io/docs/) * [kind](https://kind.sigs.k8s.io/) * [k3s](https://k3s.io/) or [k3d](https://k3d.io/) * [Docker Desktop](https://www.docker.com/products/docker-desktop/) -Alternatively, if you want to try out Argo Workflows and don't want to set up a Kubernetes cluster, try the [Killercoda course](training.md#hands-on). - !!! Warning "Development vs. Production" - These instructions are intended to help you get started quickly. They are not suitable in production. For production installs, please refer to [the installation documentation](installation.md). + These instructions are intended to help you get started quickly. They are not suitable for production. + For production installs, please refer to [the installation documentation](installation.md). ## Install Argo Workflows -To install Argo Workflows, navigate to the [releases page](https://github.com/argoproj/argo-workflows/releases/latest) and find the release you wish to use (the latest full release is preferred). - -Scroll down to the `Controller and Server` section and execute the `kubectl` commands. - -Below is an example of the install commands, ensure that you update the command to install the correct version number: - -```yaml -kubectl create namespace argo -kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v<>/install.yaml -``` - -### Patch argo-server authentication - -The argo-server (and thus the UI) defaults to client authentication, which requires clients to provide their Kubernetes bearer token in order to authenticate. For more information, refer to the [Argo Server Auth Mode documentation](argo-server-auth-mode.md). We will switch the authentication mode to `server` so that we can bypass the UI login for now: +First, specify the version you want to install in an environment variable. +Modify the command below: ```bash -kubectl patch deployment \ - argo-server \ - --namespace argo \ - --type='json' \ - -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": [ - "server", - "--auth-mode=server" -]}]' - +ARGO_WORKFLOWS_VERSION="vX.Y.Z" ``` -### Port-forward the UI - -Open a port-forward so you can access the UI: +Then, copy the commands below to apply the quick-start manifest: ```bash -kubectl -n argo port-forward deployment/argo-server 2746:2746 +kubectl create namespace argo +kubectl apply -n argo -f "https://github.com/argoproj/argo-workflows/releases/download/${ARGO_WORKFLOWS_VERSION}/quick-start-minimal.yaml" ``` -This will serve the UI on . Due to the self-signed certificate, you will receive a TLS error which you will need to manually approve. - -> Pay close attention to the URI. It uses `https` and not `http`. Navigating to `http://localhost:2746` result in server-side error that breaks the port-forwarding. - ## Install the Argo Workflows CLI You can more easily interact with Argo Workflows with the [Argo CLI](walk-through/argo-cli.md). -## Submitting an example workflow +## Submit an example workflow -### Submit an example workflow (CLI) +### Submit via the CLI ```bash argo submit -n argo --watch https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/hello-world.yaml ``` -The `--watch` flag used above will allow you to observe the workflow as it runs and the status of whether it succeeds. -When the workflow completes, the watch on the workflow will stop. +The `--watch` flag watches the workflow as it runs and reports whether it succeeds or not. +When the workflow completes, the watch stops. -You can list all the Workflows you have submitted by running the command below: +You can list all submitted Workflows by running the command below: ```bash argo list -n argo ``` -You will notice the Workflow name has a `hello-world-` prefix followed by random characters. These characters are used -to give Workflows unique names to help identify specific runs of a Workflow. If you submitted this Workflow again, -the next Workflow run would have a different name. +The Workflow name has a `hello-world-` prefix followed by random characters. +These characters give Workflows unique names to help identify specific runs of a Workflow. +If you submit this Workflow again, the next run will have different characters. -Using the `argo get` command, you can always review details of a Workflow run. The output for the command below will -be the same as the information shown as when you submitted the Workflow: +You can review the details of a Workflow run using the `argo get` command. +The output for the command below will be the same as the information shown when you submitted the Workflow: ```bash argo get -n argo @latest ``` -The `@latest` argument to the CLI is a short cut to view the latest Workflow run that was executed. +The `@latest` argument is a shortcut to view the latest Workflow run. -You can also observe the logs of the Workflow run by running the following: +You can observe the logs of the Workflow run with the following command: ```bash argo logs -n argo @latest ``` -### Submit an example workflow (GUI) - -* Open a port-forward so you can access the UI: - -```bash -kubectl -n argo port-forward deployment/argo-server 2746:2746 -``` +### Submit via the UI -* Navigate your browser to . +1. Forward the Server's port to access the UI: -* Click `+ Submit New Workflow` and then `Edit using full workflow options` + ```bash + kubectl -n argo port-forward service/argo-server 2746:2746 + ``` -* You can find an example workflow already in the text field. Press `+ Create` to start the workflow. +1. Navigate your browser to . + * **Note**: The URL uses `https` and not `http`. Navigating to `http` will result in a server-side error. + * Due to the self-signed certificate, you will receive a TLS error which you will need to manually approve. +1. Click `+ Submit New Workflow` and then `Edit using full workflow options` +1. You can find an example workflow already in the text field. Press `+ Create` to start the workflow. diff --git a/docs/releases.md b/docs/releases.md index b95024a1c722..6aaa6135df7f 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -28,8 +28,11 @@ Both the `argo-server` and `argocli` should be the same version as the controlle ## Release Cycle -New minor versions are released roughly every 6 months. Release candidates for each major release are typically available -for 4-6 weeks before the release becomes generally available. +New minor versions are released roughly every 6 months. + +Release candidates (RCs) for major and minor releases are typically available for 4-6 weeks before the release becomes generally available (GA). Features may be shipped in subsequent release candidates. + +When features are shipped in a new release candidate, the most recent release candidate will be available for at least 2 weeks to ensure it is tested sufficiently before it is pushed to GA. If bugs are found with a feature and are not resolved within the 2 week period, the features will be rolled back so as to be saved for the next major/minor release timeline, and a new release candidate will be cut for testing before pushing to GA. Otherwise, we typically release every two weeks: diff --git a/docs/releasing.md b/docs/releasing.md index 215518a5d9fb..8293f62226f2 100644 --- a/docs/releasing.md +++ b/docs/releasing.md @@ -3,6 +3,8 @@ ## Cherry-Picking Fixes ✋ Before you start, make sure you have created a release branch (e.g. `release-3.3`) and it's passing CI. +Please make sure that all patch releases (e.g. `v3.3.5`) should be released from their associated minor release branches (e.g. `release-3.3`) +to work well with our versioned website. Then get a list of commits you may want to cherry-pick: @@ -60,6 +62,11 @@ you can approve it, enable auto-merge, and then run the following to force trigg git branch -D create-pull-request/changelog git fetch upstream git checkout --track upstream/create-pull-request/changelog -git commit -s --allow-empty -m "docs: Force trigger CI" +git commit -s --allow-empty -m "chore: Force trigger CI" git push upstream create-pull-request/changelog ``` + +## Announce on Slack + +Once the changelog updates have been merged, you should announce on our Slack channels, [`#argo-workflows`](https://cloud-native.slack.com/archives/C01QW9QSSSK) and [`#argo-announcements`](https://cloud-native.slack.com/archives/C02165G1L48). +See [previous](https://cloud-native.slack.com/archives/C02165G1L48/p1701112932434469) [announcements](https://cloud-native.slack.com/archives/C01QW9QSSSK/p1701112957127489) as examples of what to write in the patch announcement. diff --git a/docs/running-nix.md b/docs/running-nix.md index 7d1a6c955060..b51deaf6ade5 100644 --- a/docs/running-nix.md +++ b/docs/running-nix.md @@ -31,7 +31,7 @@ and replace the existing hash value. The almost exact same principles apply here, the only difference being you must change the `vendorHash` and the `sha256` fields. The `vendorHash` is a hash of the vendored dependencies while the `sha256` is for the sources fetched from the `fetchFromGithub` call. -### Why am I getting a vendorSha256 mismatch ? +### Why am I getting a `vendorSha256` mismatch ? Unfortunately, dependabot is not capable of upgrading flakes automatically, when the go modules are automatically upgraded the hash of the vendor dependencies changes but this change isn't automatically reflected in the nix file. The `vendorSha256` field that needs to diff --git a/docs/service-account-secrets.md b/docs/service-account-secrets.md index 64ed36ed1d3d..5b52a9ed85ba 100644 --- a/docs/service-account-secrets.md +++ b/docs/service-account-secrets.md @@ -1,16 +1,15 @@ -# Kubernetes Secrets +# Service Account Secrets As of Kubernetes v1.24, secrets are no longer automatically created for service accounts. -You must create a secret -manually: [Find out how to create these yourself manually](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#manually-create-a-service-account-api-token) -. +You must [create a secret manually](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#manually-create-a-long-lived-api-token-for-a-serviceaccount). -You must make the secret discoverable. You have two options: +You must also make the secret discoverable. +You have two options: ## Option 1 - Discovery By Name -Name your secret `${serviceAccountName}.service-account-token`. +Name your secret `${serviceAccountName}.service-account-token`: ```yaml apiVersion: v1 @@ -22,7 +21,7 @@ metadata: type: kubernetes.io/service-account-token ``` -This option is simpler than option 2, as you can combine creating the secret with making it discoverable by name. +This option is simpler than option 2, as you can create the secret and make it discoverable by name at the same time. ## Option 2 - Discovery By Annotation diff --git a/docs/training.md b/docs/training.md index b80c5f3dd7e8..f9d4a0400da2 100644 --- a/docs/training.md +++ b/docs/training.md @@ -8,7 +8,7 @@ We also have a YouTube playlist of videos that includes workshops you can follow ## Hands-On -We've created [a Killercoda course featuring beginner and intermediate lessons](https://killercoda.com/pipekit/course/argo-workflows/). These allow to you try out Argo Workflows in your web browser without needing to install anything on your computer. Each lesson starts up a Kubernetes cluster that you can access via a web browser. +We've created [a Killercoda course featuring beginner and intermediate lessons](https://killercoda.com/argoproj/course/argo-workflows/). These allow to you try out Argo Workflows in your web browser without needing to install anything on your computer. Each lesson starts up a Kubernetes cluster that you can access via a web browser. ## Additional resources diff --git a/docs/variables.md b/docs/variables.md index 4da721c412cd..b2e3db0a25fe 100644 --- a/docs/variables.md +++ b/docs/variables.md @@ -265,7 +265,7 @@ For `Template`-level metrics: | `workflow.creationTimestamp.` | Creation time-stamp formatted with a [`strftime`](http://strftime.org) format character. | | `workflow.creationTimestamp.RFC3339` | Creation time-stamp formatted with in RFC 3339. | | `workflow.priority` | Workflow priority | -| `workflow.duration` | Workflow duration estimate, may differ from actual duration by a couple of seconds | +| `workflow.duration` | Workflow duration estimate in seconds, may differ from actual duration by a couple of seconds | | `workflow.scheduledTime` | Scheduled runtime formatted in RFC 3339 (only available for `CronWorkflow`) | ### Exit Handler diff --git a/docs/walk-through/dag.md b/docs/walk-through/dag.md index ca373fffeeaf..d5145129c8c6 100644 --- a/docs/walk-through/dag.md +++ b/docs/walk-through/dag.md @@ -1,8 +1,11 @@ # DAG -As an alternative to specifying sequences of steps, you can define the workflow as a directed-acyclic graph (DAG) by specifying the dependencies of each task. This can be simpler to maintain for complex workflows and allows for maximum parallelism when running tasks. +As an alternative to specifying sequences of [steps](steps.md), you can define a workflow as a directed-acyclic graph (DAG) by specifying the dependencies of each task. +DAGs can be simpler to maintain for complex workflows and allow for maximum parallelism when running tasks. -In the following workflow, step `A` runs first, as it has no dependencies. Once `A` has finished, steps `B` and `C` run in parallel. Finally, once `B` and `C` have completed, step `D` can run. +In the following workflow, step `A` runs first, as it has no dependencies. +Once `A` has finished, steps `B` and `C` run in parallel. +Finally, once `B` and `C` have completed, step `D` runs. ```yaml apiVersion: argoproj.io/v1alpha1 diff --git a/docs/walk-through/loops.md b/docs/walk-through/loops.md index 7835f963907c..7b2093af40e9 100644 --- a/docs/walk-through/loops.md +++ b/docs/walk-through/loops.md @@ -2,16 +2,44 @@ When writing workflows, it is often very useful to be able to iterate over a set of inputs, as this is how argo-workflows can perform loops. -There are two basic ways of running a template multiple times. +There are three basic ways of running a template multiple times. +- `withSequence` iterates over a sequence of numbers. - `withItems` takes a list of things to work on. Either - plain, single values, which are then usable in your template as '{{item}}' - a JSON object where each element in the object can be addressed by it's key as '{{item.key}}' - `withParam` takes a JSON array of items, and iterates over it - again the items can be objects like with `withItems`. This is very powerful, as you can generate the JSON in another step in your workflow, so creating a dynamic workflow. +## `withSequence` example + +This runs a template multiple times using `withSequence`. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: loop-sequence- +spec: + entrypoint: loop-sequence-example + + templates: + - name: loop-sequence-example + steps: + - - name: hello-world-x5 + template: hello-world + withSequence: + count: "5" + + - name: hello-world + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["hello world!"] +``` + ## `withItems` basic example -This example is the simplest. We are taking a basic list of items and iterating over it with `withItems`. It is limited to one varying field for each of the workflow templates instantiated. +This iterates over a list of items with `withItems`, substituting a string for each instantiated template. ```yaml apiVersion: argoproj.io/v1alpha1 diff --git a/docs/workflow-inputs.md b/docs/workflow-inputs.md index 989d94885570..a3b7cdb1fe19 100644 --- a/docs/workflow-inputs.md +++ b/docs/workflow-inputs.md @@ -34,7 +34,7 @@ Inputs to `DAGTemplate`s use the `arguments` format: dag: tasks: - name: step-A - template: step-template-A + template: step-template-a arguments: parameters: - name: template-param-1 @@ -58,13 +58,13 @@ spec: dag: tasks: - name: step-A - template: step-template-A + template: step-template-a arguments: parameters: - name: template-param-1 value: "{{workflow.parameters.workflow-param-1}}" - - name: step-template-A + - name: step-template-a inputs: parameters: - name: template-param-1 @@ -79,7 +79,7 @@ To run this example: `argo submit -n argo example.yaml -p 'workflow-param-1="abc ### Using Previous Step Outputs As Inputs -In `DAGTemplate`s, it is common to want to take the output of one step and send it as the input to another step. However, there is a difference in how this works for artifacts vs parameters. Suppose our `step-template-A` defines some outputs: +In `DAGTemplate`s, it is common to want to take the output of one step and send it as the input to another step. However, there is a difference in how this works for artifacts vs parameters. Suppose our `step-template-a` defines some outputs: ```yaml outputs: @@ -98,14 +98,14 @@ In my `DAGTemplate`, I can send these outputs to another template like this: dag: tasks: - name: step-A - template: step-template-A + template: step-template-a arguments: parameters: - name: template-param-1 value: "{{workflow.parameters.workflow-param-1}}" - name: step-B dependencies: [step-A] - template: step-template-B + template: step-template-b arguments: parameters: - name: template-param-2 diff --git a/docs/workflow-restrictions.md b/docs/workflow-restrictions.md index 7b7c03a5aa7d..75f94a970f66 100644 --- a/docs/workflow-restrictions.md +++ b/docs/workflow-restrictions.md @@ -4,18 +4,19 @@ ## Introduction -As the administrator of the controller, you may want to limit which types of Workflows your users can run. Setting workflow restrictions allows you to ensure that Workflows comply with certain requirements. +As the administrator of the controller, you may want to limit which types of Workflows your users can run. +Workflow Restrictions allow you to set requirements for all Workflows. ## Available Restrictions -* `templateReferencing: Strict`: Only Workflows using `workflowTemplateRef` will be processed. This allows the administrator of the controller to set a "library" of templates that may be run by its operator, limiting arbitrary Workflow execution. -* `templateReferencing: Secure`: Only Workflows using `workflowTemplateRef` will be processed and the controller will enforce that the workflow template that is referenced hasn't changed between operations. If you want to make sure the operator of the Workflow cannot run an arbitrary Workflow, use this option. +* `templateReferencing: Strict`: Only process Workflows using `workflowTemplateRef`. You can use this to require usage of WorkflowTemplates, disallowing arbitrary Workflow execution. +* `templateReferencing: Secure`: Same as `Strict` _plus_ enforce that a referenced WorkflowTemplate hasn't changed between operations. If a running Workflow's underlying WorkflowTemplate changes, the Workflow will error out. ## Setting Workflow Restrictions -Workflow Restrictions can be specified by adding them under the `workflowRestrictions` key in the [`workflow-controller-configmap`](./workflow-controller-configmap.yaml). +You can add `workflowRestrictions` in the [`workflow-controller-configmap`](./workflow-controller-configmap.yaml). -For example, to specify that Workflows may only run with `workflowTemplateRef` +For example, to specify that Workflows may only run with `workflowTemplateRef`: ```yaml # This file describes the config settings available in the workflow controller configmap @@ -25,5 +26,5 @@ metadata: name: workflow-controller-configmap data: workflowRestrictions: | - templateReferencing: Secure + templateReferencing: Strict ``` diff --git a/docs/workflow-templates.md b/docs/workflow-templates.md index 30fe124eea22..9865fd892a89 100644 --- a/docs/workflow-templates.md +++ b/docs/workflow-templates.md @@ -344,7 +344,7 @@ argo submit --from workflowtemplate/workflow-template-submittable If you need to submit a `WorkflowTemplate` as a `Workflow` with parameters: ```bash -argo submit --from workflowtemplate/workflow-template-submittable -p param1=value1 +argo submit --from workflowtemplate/workflow-template-submittable -p message=value1 ``` ### `kubectl` diff --git a/examples/withsequence-nested-result.yaml b/examples/withsequence-nested-result.yaml new file mode 100644 index 000000000000..1a6a2f3b4797 --- /dev/null +++ b/examples/withsequence-nested-result.yaml @@ -0,0 +1,43 @@ +# This example shows how to nest withSequence loops in a Workflow. +# A is the first step. A's output determines how many times B is executed. +# B's output then determines how many times C is executed. +# A +# / \ +# B1 B2 +# / | \ / | +# C1 C2 C3 C4 C5 +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: withsequence-nested-result- +spec: + entrypoint: hello-entrypoint + templates: + - name: hello-entrypoint + steps: + - - name: hello-a + template: hello + - - name: hello-b + template: hello-hello + withSequence: + start: "1" + end: "{{steps.hello-a.outputs.result}}" + + - name: hello-hello + steps: + - - name: hello-b + template: hello + - - name: hello-c + template: hello + withSequence: + start: "1" + end: "{{steps.hello-b.outputs.result}}" + + - name: hello + script: + image: python:alpine3.6 + command: [python] + source: | + import random + result = random.randint(0,5) + print(result) diff --git a/go.mod b/go.mod index ffe8b951b11a..6c573fd965eb 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( golang.org/x/crypto v0.22.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/oauth2 v0.13.0 - golang.org/x/sync v0.5.0 + golang.org/x/sync v0.6.0 golang.org/x/time v0.4.0 google.golang.org/api v0.151.0 google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b @@ -74,6 +74,7 @@ require ( k8s.io/kubectl v0.24.3 k8s.io/utils v0.0.0-20220713171938-56c0de1e6f5e sigs.k8s.io/yaml v1.4.0 + zombiezen.com/go/sqlite v1.2.0 ) require ( @@ -97,7 +98,10 @@ require ( github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/sagikazarmark/locafero v0.3.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/segmentio/fasthash v1.0.3 // indirect @@ -106,10 +110,14 @@ require ( github.com/vbatts/tar-split v0.11.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/tools v0.17.0 // indirect google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + modernc.org/libc v1.41.0 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.7.2 // indirect + modernc.org/sqlite v1.29.1 // indirect ) require ( diff --git a/go.sum b/go.sum index 4b93d5f78011..37dc8a22af64 100644 --- a/go.sum +++ b/go.sum @@ -805,6 +805,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -882,6 +884,7 @@ github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+Pymzi github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= @@ -1155,8 +1158,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1188,8 +1191,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1383,8 +1386,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1634,16 +1637,23 @@ modernc.org/internal v1.1.0/go.mod h1:IFhfxUE81NbN8Riy+oHylA3PIYgAvIQ5eMufNzg7/Q modernc.org/lex v1.1.1/go.mod h1:6r8o8DLJkAnOsQaGi8fMoi+Vt6LTbDaCrkUK729D8xM= modernc.org/lexer v1.0.4/go.mod h1:tOajb8S4sdfOYitzCgXDFmbVJ/LE0v1fNJ7annTw36U= modernc.org/lexer v1.0.5/go.mod h1:8npHn3u/NxCEtlC/tRSY77x5+WB3HvHMzMVElQ76ayI= +modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk= +modernc.org/libc v1.41.0/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY= modernc.org/lldb v1.0.4/go.mod h1:AKDI6wUJk7iJS8nRX54St8rq9wUIi3o5YGN3rlejR5o= modernc.org/lldb v1.0.8/go.mod h1:ybOcsZ/RNZo3q8fiGadQFRnD+1Jc+RWGcTPdeilCnUk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= modernc.org/ql v1.4.7/go.mod h1:I900l6z8ckpPy1y9VR0gu4pZ9hl9AhmQla4F8KERzdc= modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= modernc.org/sortutil v1.1.1/go.mod h1:DTj/8BqjEBLZFVPYvEGDfFFg94SsfPxQ70R+SQJ98qA= modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/sqlite v1.29.1 h1:19GY2qvWB4VPw0HppFlZCPAbmxFU41r+qjKZQdQ1ryA= +modernc.org/sqlite v1.29.1/go.mod h1:hG41jCYxOAOoO6BRK66AdRlmOcDzXf7qnwlwjUIOqa0= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/zappy v1.0.5/go.mod h1:Q5T4ra3/JJNORGK16oe8rRAti7kWtRW4Z93fzin2gBc= @@ -1672,3 +1682,5 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +zombiezen.com/go/sqlite v1.2.0 h1:jja0Ubpzpl6bjr/bSaPyvafHO+extoDJJXIaqXT7VOU= +zombiezen.com/go/sqlite v1.2.0/go.mod h1:yRl27//s/9aXU3RWs8uFQwjkTG9gYNGEls6+6SvrclY= diff --git a/hack/crdgen.sh b/hack/crdgen.sh index 1c660d2d5eaa..4b32d4785c65 100755 --- a/hack/crdgen.sh +++ b/hack/crdgen.sh @@ -11,13 +11,12 @@ add_header() { controller-gen crd:trivialVersions=true,maxDescLen=0 paths=./pkg/apis/... output:dir=manifests/base/crds/full find manifests/base/crds/full -name 'argoproj.io*.yaml' | while read -r file; do - echo "Patching ${file}" # remove junk fields go run ./hack cleancrd "$file" add_header "$file" # create minimal minimal="manifests/base/crds/minimal/$(basename "$file")" - echo "Creating ${minimal}" + echo "Creating minimal CRD file: ${minimal}" cp "$file" "$minimal" go run ./hack removecrdvalidation "$minimal" done diff --git a/manifests/quick-start-minimal.yaml b/manifests/quick-start-minimal.yaml index 795150d272e9..bd9995fd8e4f 100644 --- a/manifests/quick-start-minimal.yaml +++ b/manifests/quick-start-minimal.yaml @@ -891,11 +891,13 @@ apiVersion: v1 kind: ServiceAccount metadata: name: argo + namespace: argo --- apiVersion: v1 kind: ServiceAccount metadata: name: argo-server + namespace: argo --- apiVersion: v1 kind: ServiceAccount @@ -904,6 +906,27 @@ metadata: --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role +metadata: + name: argo-role + namespace: argo +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role metadata: annotations: workflows.argoproj.io/description: | @@ -929,126 +952,211 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: argo-role + annotations: + workflows.argoproj.io/description: | + This is the minimum recommended permissions needed if you want to use artifact GC. + name: artifactgc rules: - apiGroups: - - coordination.k8s.io + - argoproj.io resources: - - leases + - workflowartifactgctasks + verbs: + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowartifactgctasks/status + verbs: + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + Recomended minimum permissions for the `emissary` executor. + name: executor +rules: +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults verbs: - create - - get - - update + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + This is an example of the permissions you would need if you wanted to use a resource template to create and manage + other pods. The same pattern would be suitable for other resurces, e.g. a service + name: pod-manager +rules: - apiGroups: - "" resources: - pods - - pods/exec verbs: - create - get - - list - - watch - - update - patch - - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: submit-workflow-template +rules: - apiGroups: - - "" + - argoproj.io resources: - - configmaps + - workfloweventbindings verbs: - - get - - watch - list - apiGroups: - - "" + - argoproj.io resources: - - persistentvolumeclaims - - persistentvolumeclaims/finalizers + - workflowtemplates verbs: - - create - - update - - delete - get - apiGroups: - argoproj.io resources: - workflows - - workflows/finalizers - - workflowtasksets - - workflowtasksets/finalizers - - workflowartifactgctasks verbs: - - get - - list - - watch - - update - - patch - - delete - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + This is an example of the permissions you would need if you wanted to use a resource template to create and manage + other workflows. The same pattern would be suitable for other resurces, e.g. a service + name: workflow-manager +rules: - apiGroups: - argoproj.io resources: - - workflowtemplates - - workflowtemplates/finalizers + - workflows verbs: + - create - get - - list - - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: - apiGroups: - argoproj.io resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers - workflowtaskresults + - workflowtaskresults/finalizers verbs: - - list - - watch + - create + - delete - deletecollection -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - get - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: - apiGroups: - - "" + - argoproj.io resources: - - secrets + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers verbs: + - create + - delete + - deletecollection - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: - apiGroups: - argoproj.io resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers verbs: - get - list - watch - - update - - patch - - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: - apiGroups: - "" resources: - - events - verbs: - - create - - patch -- apiGroups: - - policy - resources: - - poddisruptionbudgets + - pods + - pods/exec verbs: - create - get + - list + - watch + - update + - patch - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: argo-server-role -rules: - apiGroups: - "" resources: @@ -1060,29 +1168,48 @@ rules: - apiGroups: - "" resources: - - secrets + - persistentvolumeclaims + - persistentvolumeclaims/finalizers verbs: - - get - create + - update + - delete + - get - apiGroups: - - "" + - argoproj.io resources: - - pods - - pods/exec - - pods/log + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks verbs: - get - list - watch + - update + - patch - delete + - create - apiGroups: - - "" + - argoproj.io resources: - - events + - workflowtemplates + - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: + - get + - list - watch - - create - - patch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection - apiGroups: - "" resources: @@ -1090,137 +1217,114 @@ rules: verbs: - get - list - - watch - apiGroups: - argoproj.io resources: - - eventsources - - sensors - - workflows - - workfloweventbindings - - workflowtemplates - cronworkflows - cronworkflows/finalizers verbs: - - create - get - list - watch - update - patch - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - This is the minimum recommended permissions needed if you want to use artifact GC. - name: artifactgc -rules: -- apiGroups: - - argoproj.io - resources: - - workflowartifactgctasks - verbs: - - list - - watch - apiGroups: - - argoproj.io + - "" resources: - - workflowartifactgctasks/status + - events verbs: + - create - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - Recomended minimum permissions for the `emissary` executor. - name: executor -rules: - apiGroups: - - argoproj.io + - policy resources: - - workflowtaskresults + - poddisruptionbudgets verbs: - create - - patch + - get + - delete --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: - annotations: - workflows.argoproj.io/description: | - This is an example of the permissions you would need if you wanted to use a resource template to create and manage - other pods. The same pattern would be suitable for other resurces, e.g. a service - name: pod-manager + name: argo-clusterworkflowtemplate-role rules: - apiGroups: - - "" + - argoproj.io resources: - - pods + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - - create - get - - patch + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: - name: submit-workflow-template + name: argo-server-cluster-role rules: - apiGroups: - - argoproj.io + - "" resources: - - workfloweventbindings + - configmaps verbs: + - get + - watch - list - apiGroups: - - argoproj.io + - "" resources: - - workflowtemplates + - secrets verbs: - get + - create - apiGroups: - - argoproj.io + - "" resources: - - workflows + - pods + - pods/exec + - pods/log verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - This is an example of the permissions you would need if you wanted to use a resource template to create and manage - other workflows. The same pattern would be suitable for other resurces, e.g. a service - name: workflow-manager -rules: + - get + - list + - watch + - delete - apiGroups: - - argoproj.io + - "" resources: - - workflows + - events verbs: + - watch - create + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: argo-clusterworkflowtemplate-role -rules: + - list + - watch - apiGroups: - argoproj.io resources: + - eventsources + - sensors + - workflows + - workfloweventbindings + - workflowtemplates + - cronworkflows - clusterworkflowtemplates - - clusterworkflowtemplates/finalizers verbs: + - create - get - list - watch + - update + - patch + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -1242,20 +1346,9 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding -metadata: - name: agent-default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: agent -subjects: -- kind: ServiceAccount - name: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding metadata: name: argo-binding + namespace: argo roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -1263,18 +1356,19 @@ roleRef: subjects: - kind: ServiceAccount name: argo + namespace: argo --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: argo-server-binding + name: agent-default roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: argo-server-role + name: agent subjects: - kind: ServiceAccount - name: argo-server + name: default --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -1339,6 +1433,19 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: name: argo-clusterworkflowtemplate-role-binding roleRef: @@ -1353,48 +1460,28 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: argo-server-clusterworkflowtemplate-role-binding + name: argo-server-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: argo-server-clusterworkflowtemplate-role + name: argo-server-cluster-role subjects: - kind: ServiceAccount name: argo-server namespace: argo --- -apiVersion: v1 -data: - default-v1: | - archiveLogs: true - s3: - bucket: my-bucket - endpoint: minio:9000 - insecure: true - accessKeySecret: - name: my-minio-cred - key: accesskey - secretKeySecret: - name: my-minio-cred - key: secretkey - empty: "" - my-key: | - archiveLogs: true - s3: - bucket: my-bucket - endpoint: minio:9000 - insecure: true - accessKeySecret: - name: my-minio-cred - key: accesskey - secretKeySecret: - name: my-minio-cred - key: secretkey -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - annotations: - workflows.argoproj.io/default-artifact-repository: default-v1 - name: artifact-repositories + name: argo-server-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-server-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo --- apiVersion: v1 data: @@ -1452,6 +1539,40 @@ data: kind: ConfigMap metadata: name: workflow-controller-configmap + namespace: argo +--- +apiVersion: v1 +data: + default-v1: | + archiveLogs: true + s3: + bucket: my-bucket + endpoint: minio:9000 + insecure: true + accessKeySecret: + name: my-minio-cred + key: accesskey + secretKeySecret: + name: my-minio-cred + key: secretkey + empty: "" + my-key: | + archiveLogs: true + s3: + bucket: my-bucket + endpoint: minio:9000 + insecure: true + accessKeySecret: + name: my-minio-cred + key: accesskey + secretKeySecret: + name: my-minio-cred + key: secretkey +kind: ConfigMap +metadata: + annotations: + workflows.argoproj.io/default-artifact-repository: default-v1 + name: artifact-repositories --- apiVersion: v1 kind: Secret @@ -1605,6 +1726,7 @@ apiVersion: v1 kind: Service metadata: name: argo-server + namespace: argo spec: ports: - name: web @@ -1657,6 +1779,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: argo-server + namespace: argo spec: selector: matchLabels: @@ -1669,7 +1792,6 @@ spec: containers: - args: - server - - --namespaced - --auth-mode - server - --auth-mode @@ -1708,6 +1830,62 @@ spec: --- apiVersion: apps/v1 kind: Deployment +metadata: + name: workflow-controller + namespace: argo +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + containers: + - args: + - --configmap + - workflow-controller-configmap + - --executor-image + - quay.io/codefresh/argoexec:latest + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: quay.io/codefresh/workflow-controller:latest + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: workflow-controller + securityContext: + runAsNonRoot: true + serviceAccountName: argo +--- +apiVersion: apps/v1 +kind: Deployment metadata: labels: app: httpbin @@ -1795,59 +1973,3 @@ spec: port: 9000 initialDelaySeconds: 5 periodSeconds: 10 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: workflow-controller -spec: - selector: - matchLabels: - app: workflow-controller - template: - metadata: - labels: - app: workflow-controller - spec: - containers: - - args: - - --configmap - - workflow-controller-configmap - - --executor-image - - quay.io/codefresh/argoexec:latest - - --namespaced - command: - - workflow-controller - env: - - name: LEADER_ELECTION_IDENTITY - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - image: quay.io/codefresh/workflow-controller:latest - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 6060 - initialDelaySeconds: 90 - periodSeconds: 60 - timeoutSeconds: 30 - name: workflow-controller - ports: - - containerPort: 9090 - name: metrics - - containerPort: 6060 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: workflow-controller - securityContext: - runAsNonRoot: true - serviceAccountName: argo diff --git a/manifests/quick-start-mysql.yaml b/manifests/quick-start-mysql.yaml index 24f7060631ab..c852b4176241 100644 --- a/manifests/quick-start-mysql.yaml +++ b/manifests/quick-start-mysql.yaml @@ -891,11 +891,13 @@ apiVersion: v1 kind: ServiceAccount metadata: name: argo + namespace: argo --- apiVersion: v1 kind: ServiceAccount metadata: name: argo-server + namespace: argo --- apiVersion: v1 kind: ServiceAccount @@ -904,6 +906,27 @@ metadata: --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role +metadata: + name: argo-role + namespace: argo +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role metadata: annotations: workflows.argoproj.io/description: | @@ -929,126 +952,211 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: argo-role + annotations: + workflows.argoproj.io/description: | + This is the minimum recommended permissions needed if you want to use artifact GC. + name: artifactgc rules: - apiGroups: - - coordination.k8s.io + - argoproj.io resources: - - leases + - workflowartifactgctasks + verbs: + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowartifactgctasks/status + verbs: + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + Recomended minimum permissions for the `emissary` executor. + name: executor +rules: +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults verbs: - create - - get - - update + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + This is an example of the permissions you would need if you wanted to use a resource template to create and manage + other pods. The same pattern would be suitable for other resurces, e.g. a service + name: pod-manager +rules: - apiGroups: - "" resources: - pods - - pods/exec verbs: - create - get - - list - - watch - - update - patch - - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: submit-workflow-template +rules: - apiGroups: - - "" + - argoproj.io resources: - - configmaps + - workfloweventbindings verbs: - - get - - watch - list - apiGroups: - - "" + - argoproj.io resources: - - persistentvolumeclaims - - persistentvolumeclaims/finalizers + - workflowtemplates verbs: - - create - - update - - delete - get - apiGroups: - argoproj.io resources: - workflows - - workflows/finalizers - - workflowtasksets - - workflowtasksets/finalizers - - workflowartifactgctasks verbs: - - get - - list - - watch - - update - - patch - - delete - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + This is an example of the permissions you would need if you wanted to use a resource template to create and manage + other workflows. The same pattern would be suitable for other resurces, e.g. a service + name: workflow-manager +rules: - apiGroups: - argoproj.io resources: - - workflowtemplates - - workflowtemplates/finalizers + - workflows verbs: + - create - get - - list - - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: - apiGroups: - argoproj.io resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers - workflowtaskresults + - workflowtaskresults/finalizers verbs: - - list - - watch + - create + - delete - deletecollection -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - get - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: - apiGroups: - - "" + - argoproj.io resources: - - secrets + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers verbs: + - create + - delete + - deletecollection - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: - apiGroups: - argoproj.io resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers verbs: - get - list - watch - - update - - patch - - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: - apiGroups: - "" resources: - - events - verbs: - - create - - patch -- apiGroups: - - policy - resources: - - poddisruptionbudgets + - pods + - pods/exec verbs: - create - get + - list + - watch + - update + - patch - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: argo-server-role -rules: - apiGroups: - "" resources: @@ -1060,29 +1168,48 @@ rules: - apiGroups: - "" resources: - - secrets + - persistentvolumeclaims + - persistentvolumeclaims/finalizers verbs: - - get - create + - update + - delete + - get - apiGroups: - - "" + - argoproj.io resources: - - pods - - pods/exec - - pods/log + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks verbs: - get - list - watch + - update + - patch - delete + - create - apiGroups: - - "" + - argoproj.io resources: - - events + - workflowtemplates + - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: + - get + - list - watch - - create - - patch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection - apiGroups: - "" resources: @@ -1090,137 +1217,114 @@ rules: verbs: - get - list - - watch - apiGroups: - argoproj.io resources: - - eventsources - - sensors - - workflows - - workfloweventbindings - - workflowtemplates - cronworkflows - cronworkflows/finalizers verbs: - - create - get - list - watch - update - patch - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - This is the minimum recommended permissions needed if you want to use artifact GC. - name: artifactgc -rules: -- apiGroups: - - argoproj.io - resources: - - workflowartifactgctasks - verbs: - - list - - watch - apiGroups: - - argoproj.io + - "" resources: - - workflowartifactgctasks/status + - events verbs: + - create - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - Recomended minimum permissions for the `emissary` executor. - name: executor -rules: - apiGroups: - - argoproj.io + - policy resources: - - workflowtaskresults + - poddisruptionbudgets verbs: - create - - patch + - get + - delete --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: - annotations: - workflows.argoproj.io/description: | - This is an example of the permissions you would need if you wanted to use a resource template to create and manage - other pods. The same pattern would be suitable for other resurces, e.g. a service - name: pod-manager + name: argo-clusterworkflowtemplate-role rules: - apiGroups: - - "" + - argoproj.io resources: - - pods + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - - create - get - - patch + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: - name: submit-workflow-template + name: argo-server-cluster-role rules: - apiGroups: - - argoproj.io + - "" resources: - - workfloweventbindings + - configmaps verbs: + - get + - watch - list - apiGroups: - - argoproj.io + - "" resources: - - workflowtemplates + - secrets verbs: - get + - create - apiGroups: - - argoproj.io + - "" resources: - - workflows + - pods + - pods/exec + - pods/log verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - This is an example of the permissions you would need if you wanted to use a resource template to create and manage - other workflows. The same pattern would be suitable for other resurces, e.g. a service - name: workflow-manager -rules: + - get + - list + - watch + - delete - apiGroups: - - argoproj.io + - "" resources: - - workflows + - events verbs: + - watch - create + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: argo-clusterworkflowtemplate-role -rules: + - list + - watch - apiGroups: - argoproj.io resources: + - eventsources + - sensors + - workflows + - workfloweventbindings + - workflowtemplates + - cronworkflows - clusterworkflowtemplates - - clusterworkflowtemplates/finalizers verbs: + - create - get - list - watch + - update + - patch + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -1242,20 +1346,9 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding -metadata: - name: agent-default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: agent -subjects: -- kind: ServiceAccount - name: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding metadata: name: argo-binding + namespace: argo roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -1263,18 +1356,19 @@ roleRef: subjects: - kind: ServiceAccount name: argo + namespace: argo --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: argo-server-binding + name: agent-default roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: argo-server-role + name: agent subjects: - kind: ServiceAccount - name: argo-server + name: default --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -1339,6 +1433,19 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: name: argo-clusterworkflowtemplate-role-binding roleRef: @@ -1353,48 +1460,28 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: argo-server-clusterworkflowtemplate-role-binding + name: argo-server-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: argo-server-clusterworkflowtemplate-role + name: argo-server-cluster-role subjects: - kind: ServiceAccount name: argo-server namespace: argo --- -apiVersion: v1 -data: - default-v1: | - archiveLogs: true - s3: - bucket: my-bucket - endpoint: minio:9000 - insecure: true - accessKeySecret: - name: my-minio-cred - key: accesskey - secretKeySecret: - name: my-minio-cred - key: secretkey - empty: "" - my-key: | - archiveLogs: true - s3: - bucket: my-bucket - endpoint: minio:9000 - insecure: true - accessKeySecret: - name: my-minio-cred - key: accesskey - secretKeySecret: - name: my-minio-cred - key: secretkey -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - annotations: - workflows.argoproj.io/default-artifact-repository: default-v1 - name: artifact-repositories + name: argo-server-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-server-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo --- apiVersion: v1 data: @@ -1471,6 +1558,40 @@ data: kind: ConfigMap metadata: name: workflow-controller-configmap + namespace: argo +--- +apiVersion: v1 +data: + default-v1: | + archiveLogs: true + s3: + bucket: my-bucket + endpoint: minio:9000 + insecure: true + accessKeySecret: + name: my-minio-cred + key: accesskey + secretKeySecret: + name: my-minio-cred + key: secretkey + empty: "" + my-key: | + archiveLogs: true + s3: + bucket: my-bucket + endpoint: minio:9000 + insecure: true + accessKeySecret: + name: my-minio-cred + key: accesskey + secretKeySecret: + name: my-minio-cred + key: secretkey +kind: ConfigMap +metadata: + annotations: + workflows.argoproj.io/default-artifact-repository: default-v1 + name: artifact-repositories --- apiVersion: v1 kind: Secret @@ -1635,6 +1756,7 @@ apiVersion: v1 kind: Service metadata: name: argo-server + namespace: argo spec: ports: - name: web @@ -1701,6 +1823,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: argo-server + namespace: argo spec: selector: matchLabels: @@ -1713,7 +1836,6 @@ spec: containers: - args: - server - - --namespaced - --auth-mode - server - --auth-mode @@ -1752,6 +1874,62 @@ spec: --- apiVersion: apps/v1 kind: Deployment +metadata: + name: workflow-controller + namespace: argo +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + containers: + - args: + - --configmap + - workflow-controller-configmap + - --executor-image + - quay.io/codefresh/argoexec:latest + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: quay.io/codefresh/workflow-controller:latest + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: workflow-controller + securityContext: + runAsNonRoot: true + serviceAccountName: argo +--- +apiVersion: apps/v1 +kind: Deployment metadata: labels: app: httpbin @@ -1878,59 +2056,3 @@ spec: port: 3306 nodeSelector: kubernetes.io/os: linux ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: workflow-controller -spec: - selector: - matchLabels: - app: workflow-controller - template: - metadata: - labels: - app: workflow-controller - spec: - containers: - - args: - - --configmap - - workflow-controller-configmap - - --executor-image - - quay.io/codefresh/argoexec:latest - - --namespaced - command: - - workflow-controller - env: - - name: LEADER_ELECTION_IDENTITY - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - image: quay.io/codefresh/workflow-controller:latest - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 6060 - initialDelaySeconds: 90 - periodSeconds: 60 - timeoutSeconds: 30 - name: workflow-controller - ports: - - containerPort: 9090 - name: metrics - - containerPort: 6060 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: workflow-controller - securityContext: - runAsNonRoot: true - serviceAccountName: argo diff --git a/manifests/quick-start-postgres.yaml b/manifests/quick-start-postgres.yaml index 034c609656b2..eb905b9f2891 100644 --- a/manifests/quick-start-postgres.yaml +++ b/manifests/quick-start-postgres.yaml @@ -891,11 +891,13 @@ apiVersion: v1 kind: ServiceAccount metadata: name: argo + namespace: argo --- apiVersion: v1 kind: ServiceAccount metadata: name: argo-server + namespace: argo --- apiVersion: v1 kind: ServiceAccount @@ -904,6 +906,27 @@ metadata: --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role +metadata: + name: argo-role + namespace: argo +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role metadata: annotations: workflows.argoproj.io/description: | @@ -929,126 +952,211 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: argo-role + annotations: + workflows.argoproj.io/description: | + This is the minimum recommended permissions needed if you want to use artifact GC. + name: artifactgc rules: - apiGroups: - - coordination.k8s.io + - argoproj.io resources: - - leases + - workflowartifactgctasks + verbs: + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowartifactgctasks/status + verbs: + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + Recomended minimum permissions for the `emissary` executor. + name: executor +rules: +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults verbs: - create - - get - - update + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + This is an example of the permissions you would need if you wanted to use a resource template to create and manage + other pods. The same pattern would be suitable for other resurces, e.g. a service + name: pod-manager +rules: - apiGroups: - "" resources: - pods - - pods/exec verbs: - create - get - - list - - watch - - update - patch - - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: submit-workflow-template +rules: - apiGroups: - - "" + - argoproj.io resources: - - configmaps + - workfloweventbindings verbs: - - get - - watch - list - apiGroups: - - "" + - argoproj.io resources: - - persistentvolumeclaims - - persistentvolumeclaims/finalizers + - workflowtemplates verbs: - - create - - update - - delete - get - apiGroups: - argoproj.io resources: - workflows - - workflows/finalizers - - workflowtasksets - - workflowtasksets/finalizers - - workflowartifactgctasks verbs: - - get - - list - - watch - - update - - patch - - delete - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + This is an example of the permissions you would need if you wanted to use a resource template to create and manage + other workflows. The same pattern would be suitable for other resurces, e.g. a service + name: workflow-manager +rules: - apiGroups: - argoproj.io resources: - - workflowtemplates - - workflowtemplates/finalizers + - workflows verbs: + - create - get - - list - - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: - apiGroups: - argoproj.io resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers - workflowtaskresults + - workflowtaskresults/finalizers verbs: - - list - - watch + - create + - delete - deletecollection -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - get - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: - apiGroups: - - "" + - argoproj.io resources: - - secrets + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers verbs: + - create + - delete + - deletecollection - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: - apiGroups: - argoproj.io resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers verbs: - get - list - watch - - update - - patch - - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: - apiGroups: - "" resources: - - events - verbs: - - create - - patch -- apiGroups: - - policy - resources: - - poddisruptionbudgets + - pods + - pods/exec verbs: - create - get + - list + - watch + - update + - patch - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: argo-server-role -rules: - apiGroups: - "" resources: @@ -1060,29 +1168,48 @@ rules: - apiGroups: - "" resources: - - secrets + - persistentvolumeclaims + - persistentvolumeclaims/finalizers verbs: - - get - create + - update + - delete + - get - apiGroups: - - "" + - argoproj.io resources: - - pods - - pods/exec - - pods/log + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks verbs: - get - list - watch + - update + - patch - delete + - create - apiGroups: - - "" + - argoproj.io resources: - - events + - workflowtemplates + - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: + - get + - list - watch - - create - - patch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection - apiGroups: - "" resources: @@ -1090,137 +1217,114 @@ rules: verbs: - get - list - - watch - apiGroups: - argoproj.io resources: - - eventsources - - sensors - - workflows - - workfloweventbindings - - workflowtemplates - cronworkflows - cronworkflows/finalizers verbs: - - create - get - list - watch - update - patch - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - This is the minimum recommended permissions needed if you want to use artifact GC. - name: artifactgc -rules: -- apiGroups: - - argoproj.io - resources: - - workflowartifactgctasks - verbs: - - list - - watch - apiGroups: - - argoproj.io + - "" resources: - - workflowartifactgctasks/status + - events verbs: + - create - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - Recomended minimum permissions for the `emissary` executor. - name: executor -rules: - apiGroups: - - argoproj.io + - policy resources: - - workflowtaskresults + - poddisruptionbudgets verbs: - create - - patch + - get + - delete --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: - annotations: - workflows.argoproj.io/description: | - This is an example of the permissions you would need if you wanted to use a resource template to create and manage - other pods. The same pattern would be suitable for other resurces, e.g. a service - name: pod-manager + name: argo-clusterworkflowtemplate-role rules: - apiGroups: - - "" + - argoproj.io resources: - - pods + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - - create - get - - patch + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: - name: submit-workflow-template + name: argo-server-cluster-role rules: - apiGroups: - - argoproj.io + - "" resources: - - workfloweventbindings + - configmaps verbs: + - get + - watch - list - apiGroups: - - argoproj.io + - "" resources: - - workflowtemplates + - secrets verbs: - get + - create - apiGroups: - - argoproj.io + - "" resources: - - workflows + - pods + - pods/exec + - pods/log verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - workflows.argoproj.io/description: | - This is an example of the permissions you would need if you wanted to use a resource template to create and manage - other workflows. The same pattern would be suitable for other resurces, e.g. a service - name: workflow-manager -rules: + - get + - list + - watch + - delete - apiGroups: - - argoproj.io + - "" resources: - - workflows + - events verbs: + - watch - create + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: argo-clusterworkflowtemplate-role -rules: + - list + - watch - apiGroups: - argoproj.io resources: + - eventsources + - sensors + - workflows + - workfloweventbindings + - workflowtemplates + - cronworkflows - clusterworkflowtemplates - - clusterworkflowtemplates/finalizers verbs: + - create - get - list - watch + - update + - patch + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -1242,20 +1346,9 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding -metadata: - name: agent-default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: agent -subjects: -- kind: ServiceAccount - name: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding metadata: name: argo-binding + namespace: argo roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -1263,18 +1356,19 @@ roleRef: subjects: - kind: ServiceAccount name: argo + namespace: argo --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: argo-server-binding + name: agent-default roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: argo-server-role + name: agent subjects: - kind: ServiceAccount - name: argo-server + name: default --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -1339,6 +1433,19 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: name: argo-clusterworkflowtemplate-role-binding roleRef: @@ -1353,48 +1460,28 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: argo-server-clusterworkflowtemplate-role-binding + name: argo-server-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: argo-server-clusterworkflowtemplate-role + name: argo-server-cluster-role subjects: - kind: ServiceAccount name: argo-server namespace: argo --- -apiVersion: v1 -data: - default-v1: | - archiveLogs: true - s3: - bucket: my-bucket - endpoint: minio:9000 - insecure: true - accessKeySecret: - name: my-minio-cred - key: accesskey - secretKeySecret: - name: my-minio-cred - key: secretkey - empty: "" - my-key: | - archiveLogs: true - s3: - bucket: my-bucket - endpoint: minio:9000 - insecure: true - accessKeySecret: - name: my-minio-cred - key: accesskey - secretKeySecret: - name: my-minio-cred - key: secretkey -kind: ConfigMap +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - annotations: - workflows.argoproj.io/default-artifact-repository: default-v1 - name: artifact-repositories + name: argo-server-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-server-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo --- apiVersion: v1 data: @@ -1471,6 +1558,40 @@ data: kind: ConfigMap metadata: name: workflow-controller-configmap + namespace: argo +--- +apiVersion: v1 +data: + default-v1: | + archiveLogs: true + s3: + bucket: my-bucket + endpoint: minio:9000 + insecure: true + accessKeySecret: + name: my-minio-cred + key: accesskey + secretKeySecret: + name: my-minio-cred + key: secretkey + empty: "" + my-key: | + archiveLogs: true + s3: + bucket: my-bucket + endpoint: minio:9000 + insecure: true + accessKeySecret: + name: my-minio-cred + key: accesskey + secretKeySecret: + name: my-minio-cred + key: secretkey +kind: ConfigMap +metadata: + annotations: + workflows.argoproj.io/default-artifact-repository: default-v1 + name: artifact-repositories --- apiVersion: v1 kind: Secret @@ -1635,6 +1756,7 @@ apiVersion: v1 kind: Service metadata: name: argo-server + namespace: argo spec: ports: - name: web @@ -1701,6 +1823,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: argo-server + namespace: argo spec: selector: matchLabels: @@ -1713,7 +1836,6 @@ spec: containers: - args: - server - - --namespaced - --auth-mode - server - --auth-mode @@ -1752,6 +1874,62 @@ spec: --- apiVersion: apps/v1 kind: Deployment +metadata: + name: workflow-controller + namespace: argo +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + containers: + - args: + - --configmap + - workflow-controller-configmap + - --executor-image + - quay.io/codefresh/argoexec:latest + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: quay.io/codefresh/workflow-controller:latest + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: workflow-controller + securityContext: + runAsNonRoot: true + serviceAccountName: argo +--- +apiVersion: apps/v1 +kind: Deployment metadata: labels: app: httpbin @@ -1876,59 +2054,3 @@ spec: timeoutSeconds: 2 nodeSelector: kubernetes.io/os: linux ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: workflow-controller -spec: - selector: - matchLabels: - app: workflow-controller - template: - metadata: - labels: - app: workflow-controller - spec: - containers: - - args: - - --configmap - - workflow-controller-configmap - - --executor-image - - quay.io/codefresh/argoexec:latest - - --namespaced - command: - - workflow-controller - env: - - name: LEADER_ELECTION_IDENTITY - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - image: quay.io/codefresh/workflow-controller:latest - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 6060 - initialDelaySeconds: 90 - periodSeconds: 60 - timeoutSeconds: 30 - name: workflow-controller - ports: - - containerPort: 9090 - name: metrics - - containerPort: 6060 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: workflow-controller - securityContext: - runAsNonRoot: true - serviceAccountName: argo diff --git a/manifests/quick-start/base/kustomization.yaml b/manifests/quick-start/base/kustomization.yaml index 317a8c472904..75a3516262b3 100644 --- a/manifests/quick-start/base/kustomization.yaml +++ b/manifests/quick-start/base/kustomization.yaml @@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../../namespace-install + - ../../cluster-install - minio - httpbin - webhooks diff --git a/manifests/quick-start/base/overlays/argo-server-deployment.yaml b/manifests/quick-start/base/overlays/argo-server-deployment.yaml index de2bd7d54bef..698e9dc36aaa 100644 --- a/manifests/quick-start/base/overlays/argo-server-deployment.yaml +++ b/manifests/quick-start/base/overlays/argo-server-deployment.yaml @@ -9,7 +9,6 @@ spec: - name: argo-server args: - server - - --namespaced - --auth-mode - server - --auth-mode diff --git a/persist/sqldb/archived_workflow_labels.go b/persist/sqldb/archived_workflow_labels.go index 04ce353ce209..add2bbad4bf3 100644 --- a/persist/sqldb/archived_workflow_labels.go +++ b/persist/sqldb/archived_workflow_labels.go @@ -52,9 +52,9 @@ func (r *workflowArchive) ListWorkflowsLabelValues(key string) (*wfv1.LabelValue return &wfv1.LabelValues{Items: labels}, nil } -func labelsClause(selector db.Selector, t dbType, requirements labels.Requirements) (db.Selector, error) { +func labelsClause(selector db.Selector, t dbType, requirements labels.Requirements, tableName, labelTableName string, hasClusterName bool) (db.Selector, error) { for _, req := range requirements { - cond, err := requirementToCondition(t, req) + cond, err := requirementToCondition(t, req, tableName, labelTableName, hasClusterName) if err != nil { return nil, err } @@ -63,36 +63,40 @@ func labelsClause(selector db.Selector, t dbType, requirements labels.Requiremen return selector, nil } -func requirementToCondition(t dbType, r labels.Requirement) (*db.RawExpr, error) { +func requirementToCondition(t dbType, r labels.Requirement, tableName, labelTableName string, hasClusterName bool) (*db.RawExpr, error) { + clusterNameSelector := "" + if hasClusterName { + clusterNameSelector = fmt.Sprintf("clustername = %s.clustername and", tableName) + } // Should we "sanitize our inputs"? No. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. // https://kb.objectrocket.com/postgresql/casting-in-postgresql-570#string+to+integer+casting switch r.Operator() { case selection.DoesNotExist: - return db.Raw(fmt.Sprintf("not exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key())), nil + return db.Raw(fmt.Sprintf("not exists (select 1 from %s where %s uid = %s.uid and name = '%s')", labelTableName, clusterNameSelector, tableName, r.Key())), nil case selection.Equals, selection.DoubleEquals: - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), r.Values().List()[0])), nil + return db.Raw(fmt.Sprintf("exists (select 1 from %s where %s uid = %s.uid and name = '%s' and value = '%s')", labelTableName, clusterNameSelector, tableName, r.Key(), r.Values().List()[0])), nil case selection.In: - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value in ('%s'))", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), strings.Join(r.Values().List(), "', '"))), nil + return db.Raw(fmt.Sprintf("exists (select 1 from %s where %s uid = %s.uid and name = '%s' and value in ('%s'))", labelTableName, clusterNameSelector, tableName, r.Key(), strings.Join(r.Values().List(), "', '"))), nil case selection.NotEquals: - return db.Raw(fmt.Sprintf("not exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), r.Values().List()[0])), nil + return db.Raw(fmt.Sprintf("not exists (select 1 from %s where %s uid = %s.uid and name = '%s' and value = '%s')", labelTableName, clusterNameSelector, tableName, r.Key(), r.Values().List()[0])), nil case selection.NotIn: - return db.Raw(fmt.Sprintf("not exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value in ('%s'))", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), strings.Join(r.Values().List(), "', '"))), nil + return db.Raw(fmt.Sprintf("not exists (select 1 from %s where %s uid = %s.uid and name = '%s' and value in ('%s'))", labelTableName, clusterNameSelector, tableName, r.Key(), strings.Join(r.Values().List(), "', '"))), nil case selection.Exists: - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key())), nil + return db.Raw(fmt.Sprintf("exists (select 1 from %s where %s uid = %s.uid and name = '%s')", labelTableName, clusterNameSelector, tableName, r.Key())), nil case selection.GreaterThan: i, err := strconv.Atoi(r.Values().List()[0]) if err != nil { return nil, err } - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and cast(value as %s) > %d)", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), t.intType(), i)), nil + return db.Raw(fmt.Sprintf("exists (select 1 from %s where %s uid = %s.uid and name = '%s' and cast(value as %s) > %d)", labelTableName, clusterNameSelector, tableName, r.Key(), t.intType(), i)), nil case selection.LessThan: i, err := strconv.Atoi(r.Values().List()[0]) if err != nil { return nil, err } - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and cast(value as %s) < %d)", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), t.intType(), i)), nil + return db.Raw(fmt.Sprintf("exists (select 1 from %s where %s uid = %s.uid and name = '%s' and cast(value as %s) < %d)", labelTableName, clusterNameSelector, tableName, r.Key(), t.intType(), i)), nil } return nil, fmt.Errorf("operation %v is not supported", r.Operator()) } diff --git a/persist/sqldb/archived_workflow_labels_test.go b/persist/sqldb/archived_workflow_labels_test.go index 144212cfae2e..61f0ca447d1f 100644 --- a/persist/sqldb/archived_workflow_labels_test.go +++ b/persist/sqldb/archived_workflow_labels_test.go @@ -31,7 +31,7 @@ func Test_labelsClause(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for _, req := range tt.requirements { - got, err := requirementToCondition(tt.dbType, req) + got, err := requirementToCondition(tt.dbType, req, archiveTableName, archiveLabelsTableName, true) if assert.NoError(t, err) { assert.Equal(t, tt.want, *got) } diff --git a/persist/sqldb/db_type.go b/persist/sqldb/db_type.go index edf590ed7bf5..258eedb087f3 100644 --- a/persist/sqldb/db_type.go +++ b/persist/sqldb/db_type.go @@ -12,6 +12,7 @@ type dbType string const ( MySQL dbType = "mysql" Postgres dbType = "postgres" + SQLite dbType = "sqlite" ) func dbTypeFor(session db.Session) dbType { diff --git a/persist/sqldb/mocks/WorkflowArchive.go b/persist/sqldb/mocks/WorkflowArchive.go index 16edf80c38fd..634961944077 100644 --- a/persist/sqldb/mocks/WorkflowArchive.go +++ b/persist/sqldb/mocks/WorkflowArchive.go @@ -4,10 +4,11 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - labels "k8s.io/apimachinery/pkg/labels" time "time" + utils "github.com/argoproj/argo-workflows/v3/server/utils" + v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) @@ -30,23 +31,23 @@ func (_m *WorkflowArchive) ArchiveWorkflow(wf *v1alpha1.Workflow) error { return r0 } -// CountWorkflows provides a mock function with given fields: namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements -func (_m *WorkflowArchive) CountWorkflows(namespace string, name string, namePrefix string, minStartAt time.Time, maxStartAt time.Time, labelRequirements labels.Requirements) (int64, error) { - ret := _m.Called(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements) +// CountWorkflows provides a mock function with given fields: options +func (_m *WorkflowArchive) CountWorkflows(options utils.ListOptions) (int64, error) { + ret := _m.Called(options) var r0 int64 var r1 error - if rf, ok := ret.Get(0).(func(string, string, string, time.Time, time.Time, labels.Requirements) (int64, error)); ok { - return rf(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements) + if rf, ok := ret.Get(0).(func(utils.ListOptions) (int64, error)); ok { + return rf(options) } - if rf, ok := ret.Get(0).(func(string, string, string, time.Time, time.Time, labels.Requirements) int64); ok { - r0 = rf(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements) + if rf, ok := ret.Get(0).(func(utils.ListOptions) int64); ok { + r0 = rf(options) } else { r0 = ret.Get(0).(int64) } - if rf, ok := ret.Get(1).(func(string, string, string, time.Time, time.Time, labels.Requirements) error); ok { - r1 = rf(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements) + if rf, ok := ret.Get(1).(func(utils.ListOptions) error); ok { + r1 = rf(options) } else { r1 = ret.Error(1) } @@ -122,25 +123,25 @@ func (_m *WorkflowArchive) IsEnabled() bool { return r0 } -// ListWorkflows provides a mock function with given fields: namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements, limit, offset -func (_m *WorkflowArchive) ListWorkflows(namespace string, name string, namePrefix string, minStartAt time.Time, maxStartAt time.Time, labelRequirements labels.Requirements, limit int, offset int) (v1alpha1.Workflows, error) { - ret := _m.Called(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements, limit, offset) +// ListWorkflows provides a mock function with given fields: options +func (_m *WorkflowArchive) ListWorkflows(options utils.ListOptions) (v1alpha1.Workflows, error) { + ret := _m.Called(options) var r0 v1alpha1.Workflows var r1 error - if rf, ok := ret.Get(0).(func(string, string, string, time.Time, time.Time, labels.Requirements, int, int) (v1alpha1.Workflows, error)); ok { - return rf(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements, limit, offset) + if rf, ok := ret.Get(0).(func(utils.ListOptions) (v1alpha1.Workflows, error)); ok { + return rf(options) } - if rf, ok := ret.Get(0).(func(string, string, string, time.Time, time.Time, labels.Requirements, int, int) v1alpha1.Workflows); ok { - r0 = rf(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements, limit, offset) + if rf, ok := ret.Get(0).(func(utils.ListOptions) v1alpha1.Workflows); ok { + r0 = rf(options) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(v1alpha1.Workflows) } } - if rf, ok := ret.Get(1).(func(string, string, string, time.Time, time.Time, labels.Requirements, int, int) error); ok { - r1 = rf(namespace, name, namePrefix, minStartAt, maxStartAt, labelRequirements, limit, offset) + if rf, ok := ret.Get(1).(func(utils.ListOptions) error); ok { + r1 = rf(options) } else { r1 = ret.Error(1) } diff --git a/persist/sqldb/null_workflow_archive.go b/persist/sqldb/null_workflow_archive.go index e8e37b481c9f..e3f4863bcc7c 100644 --- a/persist/sqldb/null_workflow_archive.go +++ b/persist/sqldb/null_workflow_archive.go @@ -4,9 +4,8 @@ import ( "fmt" "time" - "k8s.io/apimachinery/pkg/labels" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + sutils "github.com/argoproj/argo-workflows/v3/server/utils" ) var NullWorkflowArchive WorkflowArchive = &nullWorkflowArchive{} @@ -21,11 +20,11 @@ func (r *nullWorkflowArchive) ArchiveWorkflow(*wfv1.Workflow) error { return nil } -func (r *nullWorkflowArchive) ListWorkflows(string, string, string, time.Time, time.Time, labels.Requirements, int, int) (wfv1.Workflows, error) { +func (r *nullWorkflowArchive) ListWorkflows(options sutils.ListOptions) (wfv1.Workflows, error) { return wfv1.Workflows{}, nil } -func (r *nullWorkflowArchive) CountWorkflows(string, string, string, time.Time, time.Time, labels.Requirements) (int64, error) { +func (r *nullWorkflowArchive) CountWorkflows(options sutils.ListOptions) (int64, error) { return 0, nil } diff --git a/persist/sqldb/selector.go b/persist/sqldb/selector.go new file mode 100644 index 000000000000..5e2b9cbb53ca --- /dev/null +++ b/persist/sqldb/selector.go @@ -0,0 +1,89 @@ +package sqldb + +import ( + "github.com/upper/db/v4" + + "github.com/argoproj/argo-workflows/v3/server/utils" +) + +func BuildArchivedWorkflowSelector(selector db.Selector, tableName, labelTableName string, t dbType, options utils.ListOptions, count bool) (db.Selector, error) { + selector = selector. + And(namespaceEqual(options.Namespace)). + And(nameEqual(options.Name)). + And(namePrefixClause(options.NamePrefix)). + And(startedAtFromClause(options.MinStartedAt)). + And(startedAtToClause(options.MaxStartedAt)) + + selector, err := labelsClause(selector, t, options.LabelRequirements, tableName, labelTableName, true) + if err != nil { + return nil, err + } + if count { + return selector, nil + } + // If we were passed 0 as the limit, then we should load all available archived workflows + // to match the behavior of the `List` operations in the Kubernetes API + if options.Limit == 0 { + options.Limit = -1 + options.Offset = -1 + } + return selector. + OrderBy("-startedat"). + Limit(options.Limit). + Offset(options.Offset), nil +} + +func BuildWorkflowSelector(in string, inArgs []any, tableName, labelTableName string, t dbType, options utils.ListOptions, count bool) (out string, outArgs []any, err error) { + var clauses []*db.RawExpr + if options.Namespace != "" { + clauses = append(clauses, db.Raw("namespace = ?", options.Namespace)) + } + if options.Name != "" { + clauses = append(clauses, db.Raw("name = ?", options.Name)) + } + if options.NamePrefix != "" { + clauses = append(clauses, db.Raw("name like ?", options.NamePrefix+"%")) + } + if !options.MinStartedAt.IsZero() { + clauses = append(clauses, db.Raw("startedat >= ?", options.MinStartedAt)) + } + if !options.MaxStartedAt.IsZero() { + clauses = append(clauses, db.Raw("startedat <= ?", options.MaxStartedAt)) + } + for _, r := range options.LabelRequirements { + q, err := requirementToCondition(t, r, tableName, labelTableName, false) + if err != nil { + return "", nil, err + } + clauses = append(clauses, q) + } + out = in + outArgs = inArgs + for _, c := range clauses { + if c == nil || c.Empty() { + continue + } + out += " and " + c.Raw() + outArgs = append(outArgs, c.Arguments()...) + } + if count { + return out, outArgs, nil + } + if options.StartedAtAscending { + out += " order by startedat asc" + } else { + out += " order by startedat desc" + } + + // If we were passed 0 as the limit, then we should load all available archived workflows + // to match the behavior of the `List` operations in the Kubernetes API + if options.Limit == 0 { + options.Limit = -1 + options.Offset = -1 + } + out += " limit ?" + outArgs = append(outArgs, options.Limit) + out += " offset ?" + outArgs = append(outArgs, options.Offset) + return out, outArgs, nil +} diff --git a/persist/sqldb/workflow_archive.go b/persist/sqldb/workflow_archive.go index fce2ff97b432..55d4800cfe89 100644 --- a/persist/sqldb/workflow_archive.go +++ b/persist/sqldb/workflow_archive.go @@ -9,7 +9,6 @@ import ( "github.com/upper/db/v4" "google.golang.org/grpc/codes" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" @@ -59,8 +58,8 @@ type archivedWorkflowCount struct { type WorkflowArchive interface { ArchiveWorkflow(wf *wfv1.Workflow) error // list workflows, with the most recently started workflows at the beginning (i.e. index 0 is the most recent) - ListWorkflows(namespace string, name string, namePrefix string, minStartAt, maxStartAt time.Time, labelRequirements labels.Requirements, limit, offset int) (wfv1.Workflows, error) - CountWorkflows(namespace string, name string, namePrefix string, minStartAt, maxStartAt time.Time, labelRequirements labels.Requirements) (int64, error) + ListWorkflows(options sutils.ListOptions) (wfv1.Workflows, error) + CountWorkflows(options sutils.ListOptions) (int64, error) GetWorkflow(uid string, namespace string, name string) (*wfv1.Workflow, error) DeleteWorkflow(uid string) error DeleteExpiredWorkflows(ttl time.Duration) error @@ -146,16 +145,9 @@ func (r *workflowArchive) ArchiveWorkflow(wf *wfv1.Workflow) error { }) } -func (r *workflowArchive) ListWorkflows(namespace string, name string, namePrefix string, minStartedAt, maxStartedAt time.Time, labelRequirements labels.Requirements, limit int, offset int) (wfv1.Workflows, error) { +func (r *workflowArchive) ListWorkflows(options sutils.ListOptions) (wfv1.Workflows, error) { var archivedWfs []archivedWorkflowMetadata - // If we were passed 0 as the limit, then we should load all available archived workflows - // to match the behavior of the `List` operations in the Kubernetes API - if limit == 0 { - limit = -1 - offset = -1 - } - selectQuery, err := selectArchivedWorkflowQuery(r.dbType) if err != nil { return nil, err @@ -164,22 +156,14 @@ func (r *workflowArchive) ListWorkflows(namespace string, name string, namePrefi selector := r.session.SQL(). Select(selectQuery). From(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(namespaceEqual(namespace)). - And(nameEqual(name)). - And(namePrefixClause(namePrefix)). - And(startedAtFromClause(minStartedAt)). - And(startedAtToClause(maxStartedAt)) + Where(r.clusterManagedNamespaceAndInstanceID()) - selector, err = labelsClause(selector, r.dbType, labelRequirements) + selector, err = BuildArchivedWorkflowSelector(selector, archiveTableName, archiveLabelsTableName, r.dbType, options, false) if err != nil { return nil, err } - err = selector. - OrderBy("-startedat"). - Limit(limit). - Offset(offset). - All(&archivedWfs) + + err = selector.All(&archivedWfs) if err != nil { return nil, err } @@ -218,20 +202,15 @@ func (r *workflowArchive) ListWorkflows(namespace string, name string, namePrefi return wfs, nil } -func (r *workflowArchive) CountWorkflows(namespace string, name string, namePrefix string, minStartedAt, maxStartedAt time.Time, labelRequirements labels.Requirements) (int64, error) { +func (r *workflowArchive) CountWorkflows(options sutils.ListOptions) (int64, error) { total := &archivedWorkflowCount{} selector := r.session.SQL(). Select(db.Raw("count(*) as total")). From(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(namespaceEqual(namespace)). - And(nameEqual(name)). - And(namePrefixClause(namePrefix)). - And(startedAtFromClause(minStartedAt)). - And(startedAtToClause(maxStartedAt)) + Where(r.clusterManagedNamespaceAndInstanceID()) - selector, err := labelsClause(selector, r.dbType, labelRequirements) + selector, err := BuildArchivedWorkflowSelector(selector, archiveTableName, archiveLabelsTableName, r.dbType, options, true) if err != nil { return 0, err } @@ -253,40 +232,37 @@ func (r *workflowArchive) clusterManagedNamespaceAndInstanceID() *db.AndExpr { func startedAtFromClause(from time.Time) db.Cond { if !from.IsZero() { - return db.Cond{"startedat > ": from} + return db.Cond{"startedat >=": from} } return db.Cond{} } func startedAtToClause(to time.Time) db.Cond { if !to.IsZero() { - return db.Cond{"startedat < ": to} + return db.Cond{"startedat <=": to} } return db.Cond{} } func namespaceEqual(namespace string) db.Cond { - if namespace == "" { - return db.Cond{} - } else { + if namespace != "" { return db.Cond{"namespace": namespace} } + return db.Cond{} } func nameEqual(name string) db.Cond { - if name == "" { - return db.Cond{} - } else { + if name != "" { return db.Cond{"name": name} } + return db.Cond{} } func namePrefixClause(namePrefix string) db.Cond { - if namePrefix == "" { - return db.Cond{} - } else { - return db.Cond{"name LIKE ": namePrefix + "%"} + if namePrefix != "" { + return db.Cond{"name LIKE": namePrefix + "%"} } + return db.Cond{} } func (r *workflowArchive) GetWorkflow(uid string, namespace string, name string) (*wfv1.Workflow, error) { diff --git a/pkg/apiclient/argo-kube-client.go b/pkg/apiclient/argo-kube-client.go index 0f3ea042619b..3ae83fe809db 100644 --- a/pkg/apiclient/argo-kube-client.go +++ b/pkg/apiclient/argo-kube-client.go @@ -11,6 +11,8 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "github.com/argoproj/argo-workflows/v3/server/workflow/store" + "github.com/argoproj/argo-workflows/v3" "github.com/argoproj/argo-workflows/v3/persist/sqldb" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" @@ -25,7 +27,6 @@ import ( cronworkflowserver "github.com/argoproj/argo-workflows/v3/server/cronworkflow" "github.com/argoproj/argo-workflows/v3/server/types" workflowserver "github.com/argoproj/argo-workflows/v3/server/workflow" - "github.com/argoproj/argo-workflows/v3/server/workflowarchive" workflowtemplateserver "github.com/argoproj/argo-workflows/v3/server/workflowtemplate" "github.com/argoproj/argo-workflows/v3/util/help" "github.com/argoproj/argo-workflows/v3/util/instanceid" @@ -38,6 +39,7 @@ var ( type argoKubeClient struct { instanceIDService instanceid.Service + wfClient workflow.Interface } var _ Client = &argoKubeClient{} @@ -84,13 +86,13 @@ func newArgoKubeClient(ctx context.Context, clientConfig clientcmd.ClientConfig, if err != nil { return nil, nil, err } - return ctx, &argoKubeClient{instanceIDService}, nil + return ctx, &argoKubeClient{instanceIDService, wfClient}, nil } func (a *argoKubeClient) NewWorkflowServiceClient() workflowpkg.WorkflowServiceClient { wfArchive := sqldb.NullWorkflowArchive - wfaServer := workflowarchive.NewWorkflowArchiveServer(wfArchive) - return &errorTranslatingWorkflowServiceClient{&argoKubeWorkflowServiceClient{workflowserver.NewWorkflowServer(a.instanceIDService, argoKubeOffloadNodeStatusRepo, wfaServer)}} + wfLister := store.NewKubeLister(a.wfClient) + return &errorTranslatingWorkflowServiceClient{&argoKubeWorkflowServiceClient{workflowserver.NewWorkflowServer(a.instanceIDService, argoKubeOffloadNodeStatusRepo, wfArchive, a.wfClient, wfLister, nil)}} } func (a *argoKubeClient) NewCronWorkflowServiceClient() (cronworkflow.CronWorkflowServiceClient, error) { diff --git a/pkg/apis/workflow/v1alpha1/container_set_template_types.go b/pkg/apis/workflow/v1alpha1/container_set_template_types.go index fb685a3e81f5..ac1a4f44205f 100644 --- a/pkg/apis/workflow/v1alpha1/container_set_template_types.go +++ b/pkg/apis/workflow/v1alpha1/container_set_template_types.go @@ -12,16 +12,18 @@ import ( type ContainerSetTemplate struct { Containers []ContainerNode `json:"containers" protobuf:"bytes,4,rep,name=containers"` VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,3,rep,name=volumeMounts"` - // RetryStrategy describes how to retry a container nodes in the container set if it fails. - // Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set. + // RetryStrategy describes how to retry container nodes if the container set fails. + // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. RetryStrategy *ContainerSetRetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,5,opt,name=retryStrategy"` } +// ContainerSetRetryStrategy provides controls on how to retry a container set type ContainerSetRetryStrategy struct { // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` - // Nbr of retries + // Retries is the maximum number of retry attempts for each container. It does not include the + // first, original attempt; the maximum number of total attempts will be `retries + 1`. Retries *intstr.IntOrString `json:"retries" protobuf:"bytes,2,rep,name=retries"` } diff --git a/pkg/apis/workflow/v1alpha1/generated.proto b/pkg/apis/workflow/v1alpha1/generated.proto index 1abdbf4732aa..9bfd4967eeeb 100644 --- a/pkg/apis/workflow/v1alpha1/generated.proto +++ b/pkg/apis/workflow/v1alpha1/generated.proto @@ -409,12 +409,14 @@ message ContainerNode { repeated string dependencies = 2; } +// ContainerSetRetryStrategy provides controls on how to retry a container set message ContainerSetRetryStrategy { // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". optional string duration = 1; - // Nbr of retries + // Retries is the maximum number of retry attempts for each container. It does not include the + // first, original attempt; the maximum number of total attempts will be `retries + 1`. optional k8s.io.apimachinery.pkg.util.intstr.IntOrString retries = 2; } @@ -423,8 +425,8 @@ message ContainerSetTemplate { repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 3; - // RetryStrategy describes how to retry a container nodes in the container set if it fails. - // Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set. + // RetryStrategy describes how to retry container nodes if the container set fails. + // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. optional ContainerSetRetryStrategy retryStrategy = 5; } diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index c947e1420988..95f9fb022c79 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -1950,7 +1950,8 @@ func schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref common.Refe return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "ContainerSetRetryStrategy provides controls on how to retry a container set", + Type: []string{"object"}, Properties: map[string]spec.Schema{ "duration": { SchemaProps: spec.SchemaProps{ @@ -1961,7 +1962,7 @@ func schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref common.Refe }, "retries": { SchemaProps: spec.SchemaProps{ - Description: "Nbr of retries", + Description: "Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`.", Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -2008,7 +2009,7 @@ func schema_pkg_apis_workflow_v1alpha1_ContainerSetTemplate(ref common.Reference }, "retryStrategy": { SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy describes how to retry a container nodes in the container set if it fails. Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set.", + Description: "RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers.", Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy"), }, }, diff --git a/pkg/plugins/executor/swagger.yml b/pkg/plugins/executor/swagger.yml index 33aedaa36c99..c2d884228983 100644 --- a/pkg/plugins/executor/swagger.yml +++ b/pkg/plugins/executor/swagger.yml @@ -1002,6 +1002,8 @@ definitions: title: ContainerPort represents a network port in a single container. type: object ContainerSetRetryStrategy: + description: ContainerSetRetryStrategy provides controls on how to retry a container + set properties: duration: description: |- diff --git a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md index 19c11c6bb471..6a059f3b0315 100644 --- a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md +++ b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md @@ -2,6 +2,7 @@ # IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy +ContainerSetRetryStrategy provides controls on how to retry a container set ## Properties diff --git a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md index 50e09bc46bd8..b7bfad4cd414 100644 --- a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md +++ b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy.md @@ -1,5 +1,6 @@ # IoArgoprojWorkflowV1alpha1ContainerSetRetryStrategy +ContainerSetRetryStrategy provides controls on how to retry a container set ## Properties Name | Type | Description | Notes diff --git a/server/apiserver/argoserver.go b/server/apiserver/argoserver.go index 65fcd64313e1..7a40b263b932 100644 --- a/server/apiserver/argoserver.go +++ b/server/apiserver/argoserver.go @@ -54,6 +54,7 @@ import ( "github.com/argoproj/argo-workflows/v3/server/static" "github.com/argoproj/argo-workflows/v3/server/types" "github.com/argoproj/argo-workflows/v3/server/workflow" + "github.com/argoproj/argo-workflows/v3/server/workflow/store" "github.com/argoproj/argo-workflows/v3/server/workflowarchive" "github.com/argoproj/argo-workflows/v3/server/workflowtemplate" grpcutil "github.com/argoproj/argo-workflows/v3/util/grpc" @@ -229,7 +230,13 @@ func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(st artifactRepositories := artifactrepositories.New(as.clients.Kubernetes, as.managedNamespace, &config.ArtifactRepository) artifactServer := artifacts.NewArtifactServer(as.gatekeeper, hydrator.New(offloadRepo), wfArchive, instanceIDService, artifactRepositories) eventServer := event.NewController(instanceIDService, eventRecorderManager, as.eventQueueSize, as.eventWorkerCount, as.eventAsyncDispatch) - grpcServer := as.newGRPCServer(instanceIDService, offloadRepo, wfArchive, eventServer, config.Links, config.Columns, config.NavColor) + wfArchiveServer := workflowarchive.NewWorkflowArchiveServer(wfArchive, offloadRepo) + wfStore, err := store.NewSQLiteStore(instanceIDService) + if err != nil { + log.Fatal(err) + } + workflowServer := workflow.NewWorkflowServer(instanceIDService, offloadRepo, wfArchive, as.clients.Workflow, wfStore, wfStore) + grpcServer := as.newGRPCServer(instanceIDService, workflowServer, wfArchiveServer, eventServer, config.Links, config.Columns, config.NavColor) httpServer := as.newHTTPServer(ctx, port, artifactServer) // Start listener @@ -259,6 +266,7 @@ func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(st grpcL := tcpm.Match(cmux.Any()) go eventServer.Run(as.stopCh) + go workflowServer.Run(as.stopCh) go func() { as.checkServeErr("grpcServer", grpcServer.Serve(grpcL)) }() go func() { as.checkServeErr("httpServer", httpServer.Serve(httpL)) }() go func() { as.checkServeErr("tcpm", tcpm.Serve()) }() @@ -275,7 +283,7 @@ func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(st <-as.stopCh } -func (as *argoServer) newGRPCServer(instanceIDService instanceid.Service, offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo, wfArchive sqldb.WorkflowArchive, eventServer *event.Controller, links []*v1alpha1.Link, columns []*v1alpha1.Column, navColor string) *grpc.Server { +func (as *argoServer) newGRPCServer(instanceIDService instanceid.Service, workflowServer workflowpkg.WorkflowServiceServer, wfArchiveServer workflowarchivepkg.ArchivedWorkflowServiceServer, eventServer *event.Controller, links []*v1alpha1.Link, columns []*v1alpha1.Column, navColor string) *grpc.Server { serverLog := log.NewEntry(log.StandardLogger()) // "Prometheus histograms are a great way to measure latency distributions of your RPCs. However, since it is bad practice to have metrics of high cardinality the latency monitoring metrics are disabled by default. To enable them please call the following in your server initialization code:" @@ -307,12 +315,11 @@ func (as *argoServer) newGRPCServer(instanceIDService instanceid.Service, offloa } grpcServer := grpc.NewServer(sOpts...) - wfArchiveServer := workflowarchive.NewWorkflowArchiveServer(wfArchive) infopkg.RegisterInfoServiceServer(grpcServer, info.NewInfoServer(as.managedNamespace, links, columns, navColor)) eventpkg.RegisterEventServiceServer(grpcServer, eventServer) eventsourcepkg.RegisterEventSourceServiceServer(grpcServer, eventsource.NewEventSourceServer()) sensorpkg.RegisterSensorServiceServer(grpcServer, sensor.NewSensorServer()) - workflowpkg.RegisterWorkflowServiceServer(grpcServer, workflow.NewWorkflowServer(instanceIDService, offloadNodeStatusRepo, wfArchiveServer)) + workflowpkg.RegisterWorkflowServiceServer(grpcServer, workflowServer) workflowtemplatepkg.RegisterWorkflowTemplateServiceServer(grpcServer, workflowtemplate.NewWorkflowTemplateServer(instanceIDService)) cronworkflowpkg.RegisterCronWorkflowServiceServer(grpcServer, cronworkflow.NewCronWorkflowServer(instanceIDService)) workflowarchivepkg.RegisterArchivedWorkflowServiceServer(grpcServer, wfArchiveServer) diff --git a/server/auth/sso/sso.go b/server/auth/sso/sso.go index 2d238bba0ab5..743990d9f517 100644 --- a/server/auth/sso/sso.go +++ b/server/auth/sso/sso.go @@ -288,7 +288,7 @@ func (s *sso) HandleCallback(w http.ResponseWriter, r *http.Request) { // Some SSO implementations (Okta) require a call to // the OIDC user info path to get attributes like groups if s.userInfoPath != "" { - groups, err = c.GetUserInfoGroups(oauth2Token.AccessToken, s.issuer, s.userInfoPath) + groups, err = c.GetUserInfoGroups(s.httpClient, oauth2Token.AccessToken, s.issuer, s.userInfoPath) if err != nil { log.WithError(err).Errorf("failed to get groups claim from the given userInfoPath(%s)", s.userInfoPath) w.WriteHeader(401) diff --git a/server/auth/types/claims.go b/server/auth/types/claims.go index 677eef8d31be..ecab3d54637f 100644 --- a/server/auth/types/claims.go +++ b/server/auth/types/claims.go @@ -85,7 +85,7 @@ func (c *Claims) GetCustomGroup(customKeyName string) ([]string, error) { return newSlice, nil } -func (c *Claims) GetUserInfoGroups(accessToken, issuer, userInfoPath string) ([]string, error) { +func (c *Claims) GetUserInfoGroups(httpClient HttpClient, accessToken, issuer, userInfoPath string) ([]string, error) { url := fmt.Sprintf("%s%s", issuer, userInfoPath) request, err := http.NewRequest("GET", url, nil) diff --git a/server/auth/types/claims_test.go b/server/auth/types/claims_test.go index 1b87e8a4c336..8fa5fa4baef1 100644 --- a/server/auth/types/claims_test.go +++ b/server/auth/types/claims_test.go @@ -243,7 +243,7 @@ func TestGetUserInfoGroups(t *testing.T) { httpClient = &HttpClientMock{StatusCode: 200, Body: body} claims := &Claims{} - groups, err := claims.GetUserInfoGroups("Bearer fake", "https://fake.okta.com", "/user-info") + groups, err := claims.GetUserInfoGroups(httpClient, "Bearer fake", "https://fake.okta.com", "/user-info") assert.Equal(t, groups, []string{"Everyone"}) assert.Equal(t, nil, err) }) diff --git a/server/utils/list_options.go b/server/utils/list_options.go new file mode 100644 index 000000000000..69a03456cbd5 --- /dev/null +++ b/server/utils/list_options.go @@ -0,0 +1,133 @@ +package utils + +import ( + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +type ListOptions struct { + Namespace, Name, NamePrefix string + MinStartedAt, MaxStartedAt time.Time + LabelRequirements labels.Requirements + Limit, Offset int + ShowRemainingItemCount bool + StartedAtAscending bool +} + +func (l ListOptions) WithLimit(limit int) ListOptions { + l.Limit = limit + return l +} + +func (l ListOptions) WithOffset(offset int) ListOptions { + l.Offset = offset + return l +} + +func (l ListOptions) WithShowRemainingItemCount(showRemainingItemCount bool) ListOptions { + l.ShowRemainingItemCount = showRemainingItemCount + return l +} + +func (l ListOptions) WithMaxStartedAt(maxStartedAt time.Time) ListOptions { + l.MaxStartedAt = maxStartedAt + return l +} + +func (l ListOptions) WithMinStartedAt(minStartedAt time.Time) ListOptions { + l.MinStartedAt = minStartedAt + return l +} + +func (l ListOptions) WithStartedAtAscending(ascending bool) ListOptions { + l.StartedAtAscending = ascending + return l +} + +func BuildListOptions(options metav1.ListOptions, ns, namePrefix string) (ListOptions, error) { + if options.Continue == "" { + options.Continue = "0" + } + limit := int(options.Limit) + + offset, err := strconv.Atoi(options.Continue) + if err != nil { + // no need to use sutils here + return ListOptions{}, status.Error(codes.InvalidArgument, "listOptions.continue must be int") + } + if offset < 0 { + // no need to use sutils here + return ListOptions{}, status.Error(codes.InvalidArgument, "listOptions.continue must >= 0") + } + + // namespace is now specified as its own query parameter + // note that for backward compatibility, the field selector 'metadata.namespace' is also supported for now + namespace := ns // optional + name := "" + minStartedAt := time.Time{} + maxStartedAt := time.Time{} + showRemainingItemCount := false + for _, selector := range strings.Split(options.FieldSelector, ",") { + if len(selector) == 0 { + continue + } + if strings.HasPrefix(selector, "metadata.namespace=") { + // for backward compatibility, the field selector 'metadata.namespace' is supported for now despite the addition + // of the new 'namespace' query parameter, which is what the UI uses + fieldSelectedNamespace := strings.TrimPrefix(selector, "metadata.namespace=") + switch namespace { + case "": + namespace = fieldSelectedNamespace + case fieldSelectedNamespace: + break + default: + return ListOptions{}, status.Errorf(codes.InvalidArgument, + "'namespace' query param (%q) and fieldselector 'metadata.namespace' (%q) are both specified and contradict each other", namespace, fieldSelectedNamespace) + } + } else if strings.HasPrefix(selector, "metadata.name=") { + name = strings.TrimPrefix(selector, "metadata.name=") + } else if strings.HasPrefix(selector, "spec.startedAt>") { + minStartedAt, err = time.Parse(time.RFC3339, strings.TrimPrefix(selector, "spec.startedAt>")) + if err != nil { + // startedAt is populated by us, it should therefore be valid. + return ListOptions{}, ToStatusError(err, codes.Internal) + } + } else if strings.HasPrefix(selector, "spec.startedAt<") { + maxStartedAt, err = time.Parse(time.RFC3339, strings.TrimPrefix(selector, "spec.startedAt<")) + if err != nil { + // no need to use sutils here + return ListOptions{}, ToStatusError(err, codes.Internal) + } + } else if strings.HasPrefix(selector, "ext.showRemainingItemCount") { + showRemainingItemCount, err = strconv.ParseBool(strings.TrimPrefix(selector, "ext.showRemainingItemCount=")) + if err != nil { + // populated by us, it should therefore be valid. + return ListOptions{}, ToStatusError(err, codes.Internal) + } + } else { + return ListOptions{}, ToStatusError(fmt.Errorf("unsupported requirement %s", selector), codes.InvalidArgument) + } + } + requirements, err := labels.ParseToRequirements(options.LabelSelector) + if err != nil { + return ListOptions{}, ToStatusError(err, codes.InvalidArgument) + } + return ListOptions{ + Namespace: namespace, + Name: name, + NamePrefix: namePrefix, + MinStartedAt: minStartedAt, + MaxStartedAt: maxStartedAt, + LabelRequirements: requirements, + Limit: limit, + Offset: offset, + ShowRemainingItemCount: showRemainingItemCount, + }, nil +} diff --git a/server/workflow/store/lister.go b/server/workflow/store/lister.go new file mode 100644 index 000000000000..3a2dc0870a7a --- /dev/null +++ b/server/workflow/store/lister.go @@ -0,0 +1,41 @@ +package store + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" +) + +type WorkflowLister interface { + ListWorkflows(ctx context.Context, namespace, namePrefix string, listOptions metav1.ListOptions) (*wfv1.WorkflowList, error) + CountWorkflows(ctx context.Context, namespace, namePrefix string, listOptions metav1.ListOptions) (int64, error) +} + +type kubeLister struct { + wfClient versioned.Interface +} + +var _ WorkflowLister = &kubeLister{} + +func NewKubeLister(wfClient versioned.Interface) WorkflowLister { + return &kubeLister{wfClient: wfClient} +} + +func (k *kubeLister) ListWorkflows(ctx context.Context, namespace, namePrefix string, listOptions metav1.ListOptions) (*wfv1.WorkflowList, error) { + wfList, err := k.wfClient.ArgoprojV1alpha1().Workflows(namespace).List(ctx, listOptions) + if err != nil { + return nil, err + } + return wfList, nil +} + +func (k *kubeLister) CountWorkflows(ctx context.Context, namespace, namePrefix string, listOptions metav1.ListOptions) (int64, error) { + wfList, err := k.wfClient.ArgoprojV1alpha1().Workflows(namespace).List(ctx, listOptions) + if err != nil { + return 0, err + } + return int64(len(wfList.Items)), nil +} diff --git a/server/workflow/store/sqlite_store.go b/server/workflow/store/sqlite_store.go new file mode 100644 index 000000000000..c3518a8f5e16 --- /dev/null +++ b/server/workflow/store/sqlite_store.go @@ -0,0 +1,318 @@ +package store + +import ( + "context" + "encoding/json" + "fmt" + + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + "zombiezen.com/go/sqlite" + "zombiezen.com/go/sqlite/sqlitex" + + sutils "github.com/argoproj/argo-workflows/v3/server/utils" + + "github.com/argoproj/argo-workflows/v3/persist/sqldb" + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/util/instanceid" + "github.com/argoproj/argo-workflows/v3/workflow/common" +) + +const ( + workflowTableName = "argo_workflows" + workflowLabelsTableName = "argo_workflows_labels" + tableInitializationQuery = `create table if not exists argo_workflows ( + uid varchar(128) not null, + instanceid varchar(64), + name varchar(256), + namespace varchar(256), + phase varchar(25), + startedat timestamp, + finishedat timestamp, + workflow text, + primary key (uid) +); +create index if not exists idx_instanceid on argo_workflows (instanceid); +create table if not exists argo_workflows_labels ( + uid varchar(128) not null, + name varchar(317) not null, + value varchar(63) not null, + primary key (uid, name, value), + foreign key (uid) references argo_workflows (uid) on delete cascade +); +create index if not exists idx_name_value on argo_workflows_labels (name, value); +` + insertWorkflowQuery = `insert into argo_workflows (uid, instanceid, name, namespace, phase, startedat, finishedat, workflow) values (?, ?, ?, ?, ?, ?, ?, ?)` + insertWorkflowLabelQuery = `insert into argo_workflows_labels (uid, name, value) values (?, ?, ?)` + deleteWorkflowQuery = `delete from argo_workflows where uid = ?` +) + +func initDB() (*sqlite.Conn, error) { + conn, err := sqlite.OpenConn(":memory:", sqlite.OpenReadWrite) + if err != nil { + return nil, err + } + err = sqlitex.ExecuteTransient(conn, "pragma foreign_keys = on", nil) + if err != nil { + return nil, fmt.Errorf("failed to enable foreign key support: %w", err) + } + + err = sqlitex.ExecuteScript(conn, tableInitializationQuery, nil) + if err != nil { + return nil, err + } + return conn, nil +} + +type WorkflowStore interface { + cache.Store +} + +// SQLiteStore is a sqlite-based store. +type SQLiteStore struct { + conn *sqlite.Conn + instanceService instanceid.Service +} + +var _ WorkflowStore = &SQLiteStore{} +var _ WorkflowLister = &SQLiteStore{} + +func NewSQLiteStore(instanceService instanceid.Service) (*SQLiteStore, error) { + conn, err := initDB() + if err != nil { + return nil, err + } + return &SQLiteStore{conn: conn, instanceService: instanceService}, nil +} + +func (s *SQLiteStore) ListWorkflows(ctx context.Context, namespace, namePrefix string, listOptions metav1.ListOptions) (*wfv1.WorkflowList, error) { + options, err := sutils.BuildListOptions(listOptions, namespace, namePrefix) + if err != nil { + return nil, err + } + query := `select workflow from argo_workflows +where instanceid = ? +` + args := []any{s.instanceService.InstanceID()} + + query, args, err = sqldb.BuildWorkflowSelector(query, args, workflowTableName, workflowLabelsTableName, sqldb.SQLite, options, false) + if err != nil { + return nil, err + } + + var workflows = wfv1.Workflows{} + err = sqlitex.Execute(s.conn, query, &sqlitex.ExecOptions{ + Args: args, + ResultFunc: func(stmt *sqlite.Stmt) error { + wf := stmt.ColumnText(0) + w := wfv1.Workflow{} + err := json.Unmarshal([]byte(wf), &w) + if err != nil { + log.WithFields(log.Fields{"workflow": wf}).Errorln("unable to unmarshal workflow from database") + } else { + workflows = append(workflows, w) + } + return nil + }, + }) + if err != nil { + return nil, err + } + + return &wfv1.WorkflowList{ + Items: workflows, + }, nil +} + +func (s *SQLiteStore) CountWorkflows(ctx context.Context, namespace, namePrefix string, listOptions metav1.ListOptions) (int64, error) { + options, err := sutils.BuildListOptions(listOptions, namespace, namePrefix) + if err != nil { + return 0, err + } + query := `select count(*) as total from argo_workflows +where instanceid = ? +` + args := []any{s.instanceService.InstanceID()} + + options.Limit = 0 + options.Offset = 0 + query, args, err = sqldb.BuildWorkflowSelector(query, args, workflowTableName, workflowLabelsTableName, sqldb.SQLite, options, true) + if err != nil { + return 0, err + } + + var total int64 + err = sqlitex.Execute(s.conn, query, &sqlitex.ExecOptions{ + Args: args, + ResultFunc: func(stmt *sqlite.Stmt) error { + total = stmt.ColumnInt64(0) + return nil + }, + }) + if err != nil { + return 0, err + } + return total, nil +} + +func (s *SQLiteStore) Add(obj interface{}) error { + wf, ok := obj.(*wfv1.Workflow) + if !ok { + return fmt.Errorf("unable to convert object to Workflow. object: %v", obj) + } + done := sqlitex.Transaction(s.conn) + err := s.upsertWorkflow(wf) + defer done(&err) + return err +} + +func (s *SQLiteStore) Update(obj interface{}) error { + wf, ok := obj.(*wfv1.Workflow) + if !ok { + return fmt.Errorf("unable to convert object to Workflow. object: %v", obj) + } + done := sqlitex.Transaction(s.conn) + err := s.upsertWorkflow(wf) + defer done(&err) + return err +} + +func (s *SQLiteStore) Delete(obj interface{}) error { + wf, ok := obj.(*wfv1.Workflow) + if !ok { + return fmt.Errorf("unable to convert object to Workflow. object: %v", obj) + } + return sqlitex.Execute(s.conn, deleteWorkflowQuery, &sqlitex.ExecOptions{Args: []any{string(wf.UID)}}) +} + +func (s *SQLiteStore) Replace(list []interface{}, resourceVersion string) error { + wfs := make([]*wfv1.Workflow, 0, len(list)) + for _, obj := range list { + wf, ok := obj.(*wfv1.Workflow) + if !ok { + return fmt.Errorf("unable to convert object to Workflow. object: %v", obj) + } + wfs = append(wfs, wf) + } + done := sqlitex.Transaction(s.conn) + err := s.replaceWorkflows(wfs) + defer done(&err) + return err +} + +func (s *SQLiteStore) Resync() error { + return nil +} + +func (s *SQLiteStore) List() []interface{} { + panic("not implemented") +} + +func (s *SQLiteStore) ListKeys() []string { + panic("not implemented") +} + +func (s *SQLiteStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + panic("not implemented") +} + +func (s *SQLiteStore) GetByKey(key string) (item interface{}, exists bool, err error) { + panic("not implemented") +} + +func (s *SQLiteStore) upsertWorkflow(wf *wfv1.Workflow) error { + err := sqlitex.Execute(s.conn, deleteWorkflowQuery, &sqlitex.ExecOptions{Args: []any{string(wf.UID)}}) + if err != nil { + return err + } + // if workflow is archived, we don't need to store it in the sqlite store, we get if from the archive store instead + if wf.GetLabels()[common.LabelKeyWorkflowArchivingStatus] == "Archived" { + return nil + } + workflow, err := json.Marshal(wf) + if err != nil { + return err + } + err = sqlitex.Execute(s.conn, insertWorkflowQuery, + &sqlitex.ExecOptions{ + Args: []any{string(wf.UID), s.instanceService.InstanceID(), wf.Name, wf.Namespace, wf.Status.Phase, wf.Status.StartedAt.Time, wf.Status.FinishedAt.Time, string(workflow)}, + }, + ) + if err != nil { + return err + } + stmt, err := s.conn.Prepare(insertWorkflowLabelQuery) + if err != nil { + return err + } + for key, value := range wf.GetLabels() { + if err = stmt.Reset(); err != nil { + return err + } + stmt.BindText(1, string(wf.UID)) + stmt.BindText(2, key) + stmt.BindText(3, value) + if _, err = stmt.Step(); err != nil { + return err + } + } + return nil +} + +func (s *SQLiteStore) replaceWorkflows(workflows []*wfv1.Workflow) error { + err := sqlitex.Execute(s.conn, `delete from argo_workflows`, nil) + if err != nil { + return err + } + wfs := make([]*wfv1.Workflow, 0, len(workflows)) + for _, wf := range workflows { + // if workflow is archived, we don't need to store it in the sqlite store, we get if from the archive store instead + if wf.GetLabels()[common.LabelKeyWorkflowArchivingStatus] != "Archived" { + wfs = append(wfs, wf) + } + } + // add all workflows to argo_workflows table + stmt, err := s.conn.Prepare(insertWorkflowQuery) + if err != nil { + return err + } + for _, wf := range wfs { + if err = stmt.Reset(); err != nil { + return err + } + stmt.BindText(1, string(wf.UID)) + stmt.BindText(2, s.instanceService.InstanceID()) + stmt.BindText(3, wf.Name) + stmt.BindText(4, wf.Namespace) + stmt.BindText(5, string(wf.Status.Phase)) + stmt.BindText(6, wf.Status.StartedAt.String()) + stmt.BindText(7, wf.Status.FinishedAt.String()) + workflow, err := json.Marshal(wf) + if err != nil { + return err + } + stmt.BindText(8, string(workflow)) + if _, err = stmt.Step(); err != nil { + return err + } + } + stmt, err = s.conn.Prepare(insertWorkflowLabelQuery) + if err != nil { + return err + } + for _, wf := range wfs { + for key, val := range wf.GetLabels() { + if err = stmt.Reset(); err != nil { + return err + } + stmt.BindText(1, string(wf.UID)) + stmt.BindText(2, key) + stmt.BindText(3, val) + if _, err = stmt.Step(); err != nil { + return err + } + } + } + return nil +} diff --git a/server/workflow/store/sqlite_store_test.go b/server/workflow/store/sqlite_store_test.go new file mode 100644 index 000000000000..086c755014a7 --- /dev/null +++ b/server/workflow/store/sqlite_store_test.go @@ -0,0 +1,155 @@ +package store + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "zombiezen.com/go/sqlite" + "zombiezen.com/go/sqlite/sqlitex" + + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/util/instanceid" +) + +func TestInitDB(t *testing.T) { + conn, err := initDB() + assert.NoError(t, err) + defer conn.Close() + t.Run("TestTablesCreated", func(t *testing.T) { + err = sqlitex.Execute(conn, `select name from sqlite_master where type='table'`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + name := stmt.ColumnText(0) + assert.Contains(t, []string{workflowTableName, workflowLabelsTableName}, name) + return nil + }, + }) + require.NoError(t, err) + }) + t.Run("TestForeignKeysEnabled", func(t *testing.T) { + err = sqlitex.Execute(conn, `pragma foreign_keys`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + assert.Equal(t, "1", stmt.ColumnText(0)) + return nil + }, + }) + require.NoError(t, err) + }) + t.Run("TestIndexesCreated", func(t *testing.T) { + var indexes []string + err = sqlitex.Execute(conn, `select name from sqlite_master where type='index'`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + name := stmt.ColumnText(0) + indexes = append(indexes, name) + return nil + }, + }) + require.NoError(t, err) + assert.Contains(t, indexes, "idx_instanceid") + assert.Contains(t, indexes, "idx_name_value") + }) + t.Run("TestForeignKeysAdded", func(t *testing.T) { + err = sqlitex.Execute(conn, `pragma foreign_key_list('argo_workflows_labels')`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + assert.Equal(t, "argo_workflows", stmt.ColumnText(2)) + assert.Equal(t, "uid", stmt.ColumnText(3)) + assert.Equal(t, "uid", stmt.ColumnText(4)) + assert.Equal(t, "CASCADE", stmt.ColumnText(6)) + return nil + }, + }) + require.NoError(t, err) + }) +} + +func TestStoreOperation(t *testing.T) { + instanceIdSvc := instanceid.NewService("my-instanceid") + conn, err := initDB() + require.NoError(t, err) + store := SQLiteStore{ + conn: conn, + instanceService: instanceIdSvc, + } + t.Run("TestAddWorkflow", func(t *testing.T) { + for i := 0; i < 10; i++ { + require.NoError(t, store.Add(generateWorkflow(i))) + } + num, err := store.CountWorkflows(context.Background(), "argo", "", metav1.ListOptions{}) + require.NoError(t, err) + assert.Equal(t, int64(10), num) + // Labels are also added + require.NoError(t, sqlitex.Execute(conn, `select count(*) from argo_workflows_labels`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + assert.Equal(t, 10*4, stmt.ColumnInt(0)) + return nil + }, + })) + }) + t.Run("TestUpdateWorkflow", func(t *testing.T) { + wf := generateWorkflow(0) + wf.Labels["test-label-2"] = "value-2" + require.NoError(t, store.Update(wf)) + // workflow is updated + require.NoError(t, sqlitex.Execute(conn, `select workflow from argo_workflows where uid = 'uid-0'`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + w := stmt.ColumnText(0) + require.NoError(t, json.Unmarshal([]byte(w), &wf)) + assert.Len(t, wf.Labels, 5) + return nil + }, + })) + require.NoError(t, sqlitex.Execute(conn, `select count(*) from argo_workflows_labels where name = 'test-label-2' and value = 'value-2'`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + assert.Equal(t, 1, stmt.ColumnInt(0)) + return nil + }, + })) + }) + t.Run("TestDeleteWorkflow", func(t *testing.T) { + wf := generateWorkflow(0) + require.NoError(t, store.Delete(wf)) + // workflow is deleted + require.NoError(t, sqlitex.Execute(conn, `select count(*) from argo_workflows where uid = 'uid-0'`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + assert.Equal(t, 0, stmt.ColumnInt(0)) + return nil + }, + })) + // labels are also deleted + require.NoError(t, sqlitex.Execute(conn, `select count(*) from argo_workflows_labels where uid = 'uid-0'`, &sqlitex.ExecOptions{ + ResultFunc: func(stmt *sqlite.Stmt) error { + assert.Equal(t, 0, stmt.ColumnInt(0)) + return nil + }, + })) + }) + t.Run("TestListWorkflows", func(t *testing.T) { + wfList, err := store.ListWorkflows(context.Background(), "argo", "", metav1.ListOptions{Limit: 5}) + require.NoError(t, err) + assert.Len(t, wfList.Items, 5) + }) + t.Run("TestCountWorkflows", func(t *testing.T) { + num, err := store.CountWorkflows(context.Background(), "argo", "", metav1.ListOptions{}) + require.NoError(t, err) + assert.Equal(t, int64(9), num) + }) +} + +func generateWorkflow(uid int) *wfv1.Workflow { + return &wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(fmt.Sprintf("uid-%d", uid)), + Name: fmt.Sprintf("workflow-%d", uid), + Namespace: "argo", + Labels: map[string]string{ + "workflows.argoproj.io/completed": "true", + "workflows.argoproj.io/phase": "Succeeded", + "workflows.argoproj.io/controller-instanceid": "my-instanceid", + "test-label": fmt.Sprintf("label-%d", uid), + }, + }} +} diff --git a/server/workflow/workflow_server.go b/server/workflow/workflow_server.go index 16fcc3b8c7d3..d4f7fda5b981 100644 --- a/server/workflow/workflow_server.go +++ b/server/workflow/workflow_server.go @@ -6,24 +6,29 @@ import ( "fmt" "io" "sort" + "time" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" "github.com/argoproj/argo-workflows/v3/errors" "github.com/argoproj/argo-workflows/v3/persist/sqldb" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" - workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" "github.com/argoproj/argo-workflows/v3/server/auth" sutils "github.com/argoproj/argo-workflows/v3/server/utils" + "github.com/argoproj/argo-workflows/v3/server/workflow/store" argoutil "github.com/argoproj/argo-workflows/v3/util" "github.com/argoproj/argo-workflows/v3/util/fields" "github.com/argoproj/argo-workflows/v3/util/instanceid" @@ -36,18 +41,50 @@ import ( "github.com/argoproj/argo-workflows/v3/workflow/validate" ) +const ( + latestAlias = "@latest" + reSyncDuration = 20 * time.Minute +) + type workflowServer struct { instanceIDService instanceid.Service offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo hydrator hydrator.Interface - wfArchiveServer workflowarchivepkg.ArchivedWorkflowServiceServer + wfArchive sqldb.WorkflowArchive + wfLister store.WorkflowLister + wfReflector *cache.Reflector } -const latestAlias = "@latest" +var _ workflowpkg.WorkflowServiceServer = &workflowServer{} + +// NewWorkflowServer returns a new WorkflowServer +func NewWorkflowServer(instanceIDService instanceid.Service, offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo, wfArchive sqldb.WorkflowArchive, wfClientSet versioned.Interface, wfLister store.WorkflowLister, wfStore store.WorkflowStore) *workflowServer { + ws := &workflowServer{ + instanceIDService: instanceIDService, + offloadNodeStatusRepo: offloadNodeStatusRepo, + hydrator: hydrator.New(offloadNodeStatusRepo), + wfArchive: wfArchive, + wfLister: wfLister, + } + if wfStore != nil { + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return wfClientSet.ArgoprojV1alpha1().Workflows(metav1.NamespaceAll).List(context.Background(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return wfClientSet.ArgoprojV1alpha1().Workflows(metav1.NamespaceAll).Watch(context.Background(), options) + }, + } + wfReflector := cache.NewReflector(lw, &wfv1.Workflow{}, wfStore, reSyncDuration) + ws.wfReflector = wfReflector + } + return ws +} -// NewWorkflowServer returns a new workflowServer -func NewWorkflowServer(instanceIDService instanceid.Service, offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo, wfArchiveServer workflowarchivepkg.ArchivedWorkflowServiceServer) workflowpkg.WorkflowServiceServer { - return &workflowServer{instanceIDService, offloadNodeStatusRepo, hydrator.New(offloadNodeStatusRepo), wfArchiveServer} +func (s *workflowServer) Run(stopCh <-chan struct{}) { + if s.wfReflector != nil { + s.wfReflector.Run(stopCh) + } } func (s *workflowServer) CreateWorkflow(ctx context.Context, req *workflowpkg.WorkflowCreateRequest) (*wfv1.Workflow, error) { @@ -128,65 +165,75 @@ func (s *workflowServer) GetWorkflow(ctx context.Context, req *workflowpkg.Workf return wf, nil } -func mergeWithArchivedWorkflows(liveWfs wfv1.WorkflowList, archivedWfs wfv1.WorkflowList, numWfsToKeep int) *wfv1.WorkflowList { - var mergedWfs []wfv1.Workflow - var uidToWfs = map[types.UID][]wfv1.Workflow{} - for _, item := range liveWfs.Items { - uidToWfs[item.UID] = append(uidToWfs[item.UID], item) - } - for _, item := range archivedWfs.Items { - uidToWfs[item.UID] = append(uidToWfs[item.UID], item) +func (s *workflowServer) ListWorkflows(ctx context.Context, req *workflowpkg.WorkflowListRequest) (*wfv1.WorkflowList, error) { + listOption := metav1.ListOptions{} + if req.ListOptions != nil { + listOption = *req.ListOptions } + s.instanceIDService.With(&listOption) - for _, v := range uidToWfs { - // The archived workflow we saved in the database have "Persisted" as the archival status. - // Prioritize 'Archived' over 'Persisted' because 'Archived' means the workflow is in the cluster - if len(v) == 1 { - mergedWfs = append(mergedWfs, v[0]) - } else { - if ok := v[0].Labels[common.LabelKeyWorkflowArchivingStatus] == "Archived"; ok { - mergedWfs = append(mergedWfs, v[0]) - } else { - mergedWfs = append(mergedWfs, v[1]) - } - } + options, err := sutils.BuildListOptions(listOption, req.Namespace, "") + if err != nil { + return nil, err } - mergedWfsList := wfv1.WorkflowList{Items: mergedWfs, ListMeta: liveWfs.ListMeta} - sort.Sort(mergedWfsList.Items) - numWfs := 0 - var finalWfs []wfv1.Workflow - for _, item := range mergedWfsList.Items { - if numWfsToKeep == 0 || numWfs < numWfsToKeep { - finalWfs = append(finalWfs, item) - numWfs += 1 - } + // verify if we have permission to list Workflows + allowed, err := auth.CanI(ctx, "list", workflow.WorkflowPlural, options.Namespace, "") + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) } - return &wfv1.WorkflowList{Items: finalWfs, ListMeta: liveWfs.ListMeta} -} - -func (s *workflowServer) ListWorkflows(ctx context.Context, req *workflowpkg.WorkflowListRequest) (*wfv1.WorkflowList, error) { - wfClient := auth.GetWfClient(ctx) - - listOption := &metav1.ListOptions{} - if req.ListOptions != nil { - listOption = req.ListOptions + if !allowed { + return nil, status.Error(codes.PermissionDenied, fmt.Sprintf("Permission denied, you are not allowed to list workflows in namespace \"%s\". Maybe you want to specify a namespace with query parameter `.namespace=%s`?", options.Namespace, options.Namespace)) } - s.instanceIDService.With(listOption) - wfList, err := wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).List(ctx, *listOption) + + var wfs wfv1.Workflows + liveWfCount, err := s.wfLister.CountWorkflows(ctx, req.Namespace, "", listOption) if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } - archivedWfList, err := s.wfArchiveServer.ListArchivedWorkflows(ctx, &workflowarchivepkg.ListArchivedWorkflowsRequest{ - ListOptions: listOption, - NamePrefix: "", - Namespace: req.Namespace, - }) + archivedCount, err := s.wfArchive.CountWorkflows(options) if err != nil { - log.Warnf("unable to list archived workflows:%v", err) - } else { - if archivedWfList != nil { - wfList = mergeWithArchivedWorkflows(*wfList, *archivedWfList, int(listOption.Limit)) + return nil, sutils.ToStatusError(err, codes.Internal) + } + totalCount := liveWfCount + archivedCount + + // first fetch live workflows + liveWfList := &wfv1.WorkflowList{} + if liveWfCount > 0 && (options.Limit == 0 || options.Offset < int(liveWfCount)) { + liveWfList, err = s.wfLister.ListWorkflows(ctx, req.Namespace, "", listOption) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + wfs = append(wfs, liveWfList.Items...) + } + + // then fetch archived workflows + if options.Limit == 0 || + int64(options.Offset+options.Limit) > liveWfCount { + archivedOffset := options.Offset - int(liveWfCount) + archivedLimit := options.Limit + if archivedOffset < 0 { + archivedOffset = 0 + archivedLimit = options.Limit - len(liveWfList.Items) + } + archivedWfList, err := s.wfArchive.ListWorkflows(options.WithLimit(archivedLimit).WithOffset(archivedOffset)) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) } + wfs = append(wfs, archivedWfList...) + } + meta := metav1.ListMeta{ResourceVersion: liveWfList.ResourceVersion} + if s.wfReflector != nil { + meta.ResourceVersion = s.wfReflector.LastSyncResourceVersion() + } + remainCount := totalCount - int64(options.Offset) - int64(len(wfs)) + if remainCount < 0 { + remainCount = 0 + } + if remainCount > 0 { + meta.Continue = fmt.Sprintf("%v", options.Offset+len(wfs)) + } + if options.ShowRemainingItemCount { + meta.RemainingItemCount = &remainCount } cleaner := fields.NewCleaner(req.Fields) @@ -195,10 +242,10 @@ func (s *workflowServer) ListWorkflows(ctx context.Context, req *workflowpkg.Wor if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } - for i, wf := range wfList.Items { + for i, wf := range wfs { if wf.Status.IsOffloadNodeStatus() { if s.offloadNodeStatusRepo.IsEnabled() { - wfList.Items[i].Status.Nodes = offloadedNodes[sqldb.UUIDVersion{UID: string(wf.UID), Version: wf.GetOffloadNodeStatusVersion()}] + wfs[i].Status.Nodes = offloadedNodes[sqldb.UUIDVersion{UID: string(wf.UID), Version: wf.GetOffloadNodeStatusVersion()}] } else { log.WithFields(log.Fields{"namespace": wf.Namespace, "name": wf.Name}).Warn(sqldb.OffloadNodeStatusDisabled) } @@ -207,9 +254,9 @@ func (s *workflowServer) ListWorkflows(ctx context.Context, req *workflowpkg.Wor } // we make no promises about the overall list sorting, we just sort each page - sort.Sort(wfList.Items) + sort.Sort(wfs) - res := &wfv1.WorkflowList{ListMeta: metav1.ListMeta{Continue: wfList.Continue, ResourceVersion: wfList.ResourceVersion}, Items: wfList.Items} + res := &wfv1.WorkflowList{ListMeta: meta, Items: wfs} newRes := &wfv1.WorkflowList{} if ok, err := cleaner.Clean(res, &newRes); err != nil { return nil, sutils.ToStatusError(fmt.Errorf("unable to CleanFields in request: %w", err), codes.Internal) @@ -642,15 +689,15 @@ func (s *workflowServer) getWorkflow(ctx context.Context, wfClient versioned.Int var err error wf, origErr := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(ctx, name, options) if wf == nil || origErr != nil { - wf, err = s.wfArchiveServer.GetArchivedWorkflow(ctx, &workflowarchivepkg.GetArchivedWorkflowRequest{ - Namespace: namespace, - Name: name, - }) + wf, err = s.wfArchive.GetWorkflow("", namespace, name) if err != nil { log.Errorf("failed to get live workflow: %v; failed to get archived workflow: %v", origErr, err) // We only return the original error to preserve the original status code. return nil, sutils.ToStatusError(origErr, codes.Internal) } + if wf == nil { + return nil, status.Error(codes.NotFound, "not found") + } } return wf, nil } diff --git a/server/workflow/workflow_server_test.go b/server/workflow/workflow_server_test.go index 73a68bb29fe1..b00afd269985 100644 --- a/server/workflow/workflow_server_test.go +++ b/server/workflow/workflow_server_test.go @@ -5,13 +5,14 @@ import ( "fmt" "testing" - "time" "github.com/go-jose/go-jose/v3/jwt" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + authorizationv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes/fake" @@ -25,7 +26,8 @@ import ( v1alpha "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-workflows/v3/server/auth" "github.com/argoproj/argo-workflows/v3/server/auth/types" - "github.com/argoproj/argo-workflows/v3/server/workflowarchive" + sutils "github.com/argoproj/argo-workflows/v3/server/utils" + "github.com/argoproj/argo-workflows/v3/server/workflow/store" "github.com/argoproj/argo-workflows/v3/util" "github.com/argoproj/argo-workflows/v3/util/instanceid" "github.com/argoproj/argo-workflows/v3/workflow/common" @@ -136,7 +138,7 @@ const wf2 = ` "namespace": "workflows", "resourceVersion": "52919656", "selfLink": "/apis/argoproj.io/v1alpha1/namespaces/workflows/workflows/hello-world-b6h5m", - "uid": "91066a6c-1ddc-11ea-b443-42010aa80075" + "uid": "91066a6c-1ddc-11ea-b443-42010aa80074" }, "spec": { @@ -199,7 +201,7 @@ const wf3 = ` "namespace": "test", "resourceVersion": "53020772", "selfLink": "/apis/argoproj.io/v1alpha1/namespaces/workflows/workflows/hello-world-9tql2", - "uid": "6522aff1-1e01-11ea-b443-42010aa80075" + "uid": "6522aff1-1e01-11ea-b443-42010aa80074" }, "spec": { @@ -325,7 +327,7 @@ const wf5 = ` "namespace": "workflows", "resourceVersion": "53020772", "selfLink": "/apis/argoproj.io/v1alpha1/namespaces/workflows/workflows/hello-world-9tql2", - "uid": "6522aff1-1e01-11ea-b443-42010aa80075" + "uid": "6522aff1-1e01-11ea-b443-42010aa80073" }, "spec": { @@ -574,7 +576,6 @@ func getWorkflowServer() (workflowpkg.WorkflowServiceServer, context.Context) { v1alpha1.MustUnmarshal(unlabelled, &unlabelledObj) v1alpha1.MustUnmarshal(wf1, &wfObj1) - v1alpha1.MustUnmarshal(wf1, &wfObj1) v1alpha1.MustUnmarshal(wf2, &wfObj2) v1alpha1.MustUnmarshal(wf3, &wfObj3) v1alpha1.MustUnmarshal(wf4, &wfObj4) @@ -590,7 +591,6 @@ func getWorkflowServer() (workflowpkg.WorkflowServiceServer, context.Context) { archivedRepo := &mocks.WorkflowArchive{} - wfaServer := workflowarchive.NewWorkflowArchiveServer(archivedRepo) archivedRepo.On("GetWorkflow", "", "test", "hello-world-9tql2-test").Return(&v1alpha1.Workflow{ ObjectMeta: metav1.ObjectMeta{Name: "hello-world-9tql2-test", Namespace: "test"}, Spec: v1alpha1.WorkflowSpec{ @@ -604,11 +604,41 @@ func getWorkflowServer() (workflowpkg.WorkflowServiceServer, context.Context) { archivedRepo.On("GetWorkflow", "", "test", "unlabelled").Return(nil, nil) archivedRepo.On("GetWorkflow", "", "workflows", "latest").Return(nil, nil) archivedRepo.On("GetWorkflow", "", "workflows", "hello-world-9tql2-not").Return(nil, nil) - server := NewWorkflowServer(instanceid.NewService("my-instanceid"), offloadNodeStatusRepo, wfaServer) + r, err := labels.ParseToRequirements("workflows.argoproj.io/controller-instanceid=my-instanceid") + if err != nil { + panic(err) + } + archivedRepo.On("CountWorkflows", sutils.ListOptions{Namespace: "workflows", LabelRequirements: r}).Return(int64(2), nil) + archivedRepo.On("ListWorkflows", sutils.ListOptions{Namespace: "workflows", Limit: -2, LabelRequirements: r}).Return(v1alpha1.Workflows{wfObj2, failedWfObj}, nil) + archivedRepo.On("CountWorkflows", sutils.ListOptions{Namespace: "test", LabelRequirements: r}).Return(int64(1), nil) + archivedRepo.On("ListWorkflows", sutils.ListOptions{Namespace: "test", Limit: -1, LabelRequirements: r}).Return(v1alpha1.Workflows{wfObj4}, nil) + kubeClientSet := fake.NewSimpleClientset() + kubeClientSet.PrependReactor("create", "selfsubjectaccessreviews", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &authorizationv1.SelfSubjectAccessReview{ + Status: authorizationv1.SubjectAccessReviewStatus{Allowed: true}, + }, nil + }) wfClientset := v1alpha.NewSimpleClientset(&unlabelledObj, &wfObj1, &wfObj2, &wfObj3, &wfObj4, &wfObj5, &failedWfObj, &wftmpl, &cronwfObj, &cwfTmpl) wfClientset.PrependReactor("create", "workflows", generateNameReactor) ctx := context.WithValue(context.WithValue(context.WithValue(context.TODO(), auth.WfKey, wfClientset), auth.KubeKey, kubeClientSet), auth.ClaimsKey, &types.Claims{Claims: jwt.Claims{Subject: "my-sub"}}) + listOptions := &metav1.ListOptions{} + instanceIdSvc := instanceid.NewService("my-instanceid") + instanceIdSvc.With(listOptions) + wfStore, err := store.NewSQLiteStore(instanceIdSvc) + if err != nil { + panic(err) + } + if err = wfStore.Add(&wfObj1); err != nil { + panic(err) + } + if err = wfStore.Add(&wfObj3); err != nil { + panic(err) + } + if err = wfStore.Add(&wfObj5); err != nil { + panic(err) + } + server := NewWorkflowServer(instanceIdSvc, offloadNodeStatusRepo, archivedRepo, wfClientset, wfStore, wfStore) return server, ctx } @@ -650,26 +680,6 @@ func (t testWatchWorkflowServer) Send(*workflowpkg.WorkflowWatchEvent) error { panic("implement me") } -func TestMergeWithArchivedWorkflows(t *testing.T) { - timeNow := time.Now() - wf1Live := v1alpha1.Workflow{ - ObjectMeta: metav1.ObjectMeta{UID: "1", CreationTimestamp: metav1.Time{Time: timeNow.Add(time.Second)}, - Labels: map[string]string{common.LabelKeyWorkflowArchivingStatus: "Archived"}}} - wf1Archived := v1alpha1.Workflow{ - ObjectMeta: metav1.ObjectMeta{UID: "1", CreationTimestamp: metav1.Time{Time: timeNow.Add(time.Second)}, - Labels: map[string]string{common.LabelKeyWorkflowArchivingStatus: "Persisted"}}} - wf2 := v1alpha1.Workflow{ - ObjectMeta: metav1.ObjectMeta{UID: "2", CreationTimestamp: metav1.Time{Time: timeNow.Add(2 * time.Second)}}} - wf3 := v1alpha1.Workflow{ - ObjectMeta: metav1.ObjectMeta{UID: "3", CreationTimestamp: metav1.Time{Time: timeNow.Add(3 * time.Second)}}} - liveWfList := v1alpha1.WorkflowList{Items: []v1alpha1.Workflow{wf1Live, wf2}} - archivedWfList := v1alpha1.WorkflowList{Items: []v1alpha1.Workflow{wf1Archived, wf3, wf2}} - expectedWfList := v1alpha1.WorkflowList{Items: []v1alpha1.Workflow{wf3, wf2, wf1Live}} - expectedShortWfList := v1alpha1.WorkflowList{Items: []v1alpha1.Workflow{wf3, wf2}} - assert.Equal(t, expectedWfList.Items, mergeWithArchivedWorkflows(liveWfList, archivedWfList, 0).Items) - assert.Equal(t, expectedShortWfList.Items, mergeWithArchivedWorkflows(liveWfList, archivedWfList, 2).Items) -} - func TestWatchWorkflows(t *testing.T) { server, ctx := getWorkflowServer() wf := &v1alpha1.Workflow{ diff --git a/server/workflowarchive/archived_workflow_server.go b/server/workflowarchive/archived_workflow_server.go index 511fe8d3ecc6..2f6c69369dce 100644 --- a/server/workflowarchive/archived_workflow_server.go +++ b/server/workflowarchive/archived_workflow_server.go @@ -6,9 +6,6 @@ import ( "os" "regexp" "sort" - "strconv" - "strings" - "time" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" @@ -23,6 +20,7 @@ import ( "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/server/auth" + "github.com/argoproj/argo-workflows/v3/workflow/hydrator" "github.com/argoproj/argo-workflows/v3/workflow/util" sutils "github.com/argoproj/argo-workflows/v3/server/utils" @@ -31,117 +29,53 @@ import ( const disableValueListRetrievalKeyPattern = "DISABLE_VALUE_LIST_RETRIEVAL_KEY_PATTERN" type archivedWorkflowServer struct { - wfArchive sqldb.WorkflowArchive + wfArchive sqldb.WorkflowArchive + offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo + hydrator hydrator.Interface } // NewWorkflowArchiveServer returns a new archivedWorkflowServer -func NewWorkflowArchiveServer(wfArchive sqldb.WorkflowArchive) workflowarchivepkg.ArchivedWorkflowServiceServer { - return &archivedWorkflowServer{wfArchive: wfArchive} +func NewWorkflowArchiveServer(wfArchive sqldb.WorkflowArchive, offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo) workflowarchivepkg.ArchivedWorkflowServiceServer { + return &archivedWorkflowServer{wfArchive, offloadNodeStatusRepo, hydrator.New(offloadNodeStatusRepo)} } func (w *archivedWorkflowServer) ListArchivedWorkflows(ctx context.Context, req *workflowarchivepkg.ListArchivedWorkflowsRequest) (*wfv1.WorkflowList, error) { - options := req.ListOptions - namePrefix := req.NamePrefix - if options == nil { - options = &metav1.ListOptions{} - } - if options.Continue == "" { - options.Continue = "0" - } - limit := int(options.Limit) - offset, err := strconv.Atoi(options.Continue) + options, err := sutils.BuildListOptions(*req.ListOptions, req.Namespace, req.NamePrefix) if err != nil { - // no need to use sutils here - return nil, status.Error(codes.InvalidArgument, "listOptions.continue must be int") - } - if offset < 0 { - // no need to use sutils here - return nil, status.Error(codes.InvalidArgument, "listOptions.continue must >= 0") - } - - // namespace is now specified as its own query parameter - // note that for backward compatibility, the field selector 'metadata.namespace' is also supported for now - namespace := req.Namespace // optional - name := "" - minStartedAt := time.Time{} - maxStartedAt := time.Time{} - showRemainingItemCount := false - for _, selector := range strings.Split(options.FieldSelector, ",") { - if len(selector) == 0 { - continue - } - if strings.HasPrefix(selector, "metadata.namespace=") { - // for backward compatibility, the field selector 'metadata.namespace' is supported for now despite the addition - // of the new 'namespace' query parameter, which is what the UI uses - fieldSelectedNamespace := strings.TrimPrefix(selector, "metadata.namespace=") - switch namespace { - case "": - namespace = fieldSelectedNamespace - case fieldSelectedNamespace: - break - default: - return nil, status.Errorf(codes.InvalidArgument, - "'namespace' query param (%q) and fieldselector 'metadata.namespace' (%q) are both specified and contradict each other", namespace, fieldSelectedNamespace) - } - } else if strings.HasPrefix(selector, "metadata.name=") { - name = strings.TrimPrefix(selector, "metadata.name=") - } else if strings.HasPrefix(selector, "spec.startedAt>") { - minStartedAt, err = time.Parse(time.RFC3339, strings.TrimPrefix(selector, "spec.startedAt>")) - if err != nil { - // startedAt is populated by us, it should therefore be valid. - return nil, sutils.ToStatusError(err, codes.Internal) - } - } else if strings.HasPrefix(selector, "spec.startedAt<") { - maxStartedAt, err = time.Parse(time.RFC3339, strings.TrimPrefix(selector, "spec.startedAt<")) - if err != nil { - // no need to use sutils here - return nil, sutils.ToStatusError(err, codes.Internal) - } - } else if strings.HasPrefix(selector, "ext.showRemainingItemCount") { - showRemainingItemCount, err = strconv.ParseBool(strings.TrimPrefix(selector, "ext.showRemainingItemCount=")) - if err != nil { - // populated by us, it should therefore be valid. - return nil, sutils.ToStatusError(err, codes.Internal) - } - } else { - return nil, sutils.ToStatusError(fmt.Errorf("unsupported requirement %s", selector), codes.InvalidArgument) - } - } - requirements, err := labels.ParseToRequirements(options.LabelSelector) - if err != nil { - return nil, sutils.ToStatusError(err, codes.InvalidArgument) + return nil, err } // verify if we have permission to list Workflows - allowed, err := auth.CanI(ctx, "list", workflow.WorkflowPlural, namespace, "") + allowed, err := auth.CanI(ctx, "list", workflow.WorkflowPlural, options.Namespace, "") if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } if !allowed { - return nil, status.Error(codes.PermissionDenied, fmt.Sprintf("Permission denied, you are not allowed to list workflows in namespace \"%s\". Maybe you want to specify a namespace with query parameter `.namespace=%s`?", namespace, namespace)) + return nil, status.Error(codes.PermissionDenied, fmt.Sprintf("Permission denied, you are not allowed to list workflows in namespace \"%s\". Maybe you want to specify a namespace with query parameter `.namespace=%s`?", options.Namespace, options.Namespace)) } + limit := options.Limit + offset := options.Offset // When the zero value is passed, we should treat this as returning all results // to align ourselves with the behavior of the `List` endpoints in the Kubernetes API loadAll := limit == 0 - limitWithMore := 0 if !loadAll { // Attempt to load 1 more record than we actually need as an easy way to determine whether or not more // records exist than we're currently requesting - limitWithMore = limit + 1 + options.Limit += 1 } - items, err := w.wfArchive.ListWorkflows(namespace, name, namePrefix, minStartedAt, maxStartedAt, requirements, limitWithMore, offset) + items, err := w.wfArchive.ListWorkflows(options) if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } meta := metav1.ListMeta{} - if showRemainingItemCount && !loadAll { - total, err := w.wfArchive.CountWorkflows(namespace, name, namePrefix, minStartedAt, maxStartedAt, requirements) + if options.ShowRemainingItemCount && !loadAll { + total, err := w.wfArchive.CountWorkflows(options) if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } @@ -282,6 +216,7 @@ func (w *archivedWorkflowServer) RetryArchivedWorkflow(ctx context.Context, req if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } + oriUid := wf.UID _, err = wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).Get(ctx, wf.Name, metav1.GetOptions{}) if apierr.IsNotFound(err) { @@ -299,12 +234,30 @@ func (w *archivedWorkflowServer) RetryArchivedWorkflow(ctx context.Context, req } } + log.WithFields(log.Fields{"Dehydrate workflow uid=": wf.UID}).Info("RetryArchivedWorkflow") + // If the Workflow needs to be dehydrated in order to capture and retain all of the previous state for the subsequent workflow, then do so + err = w.hydrator.Dehydrate(wf) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + wf.ObjectMeta.ResourceVersion = "" wf.ObjectMeta.UID = "" result, err := wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).Create(ctx, wf, metav1.CreateOptions{}) if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } + // if the Workflow was dehydrated before, we need to capture and maintain its previous state for the new Workflow + if !w.hydrator.IsHydrated(wf) { + offloadedNodes, err := w.offloadNodeStatusRepo.Get(string(oriUid), wf.GetOffloadNodeStatusVersion()) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + _, err = w.offloadNodeStatusRepo.Save(string(result.UID), wf.Namespace, offloadedNodes) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + } return result, nil } diff --git a/server/workflowarchive/archived_workflow_server_test.go b/server/workflowarchive/archived_workflow_server_test.go index 7d3f9c89fac2..7f26bbbb20eb 100644 --- a/server/workflowarchive/archived_workflow_server_test.go +++ b/server/workflowarchive/archived_workflow_server_test.go @@ -7,21 +7,24 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" authorizationv1 "k8s.io/api/authorization/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" kubefake "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" + "github.com/argoproj/argo-workflows/v3/persist/sqldb" "github.com/argoproj/argo-workflows/v3/persist/sqldb/mocks" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" argofake "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-workflows/v3/server/auth" + sutils "github.com/argoproj/argo-workflows/v3/server/utils" "github.com/argoproj/argo-workflows/v3/workflow/common" ) @@ -29,7 +32,10 @@ func Test_archivedWorkflowServer(t *testing.T) { repo := &mocks.WorkflowArchive{} kubeClient := &kubefake.Clientset{} wfClient := &argofake.Clientset{} - w := NewWorkflowArchiveServer(repo) + offloadNodeStatusRepo := &mocks.OffloadNodeStatusRepo{} + offloadNodeStatusRepo.On("IsEnabled", mock.Anything).Return(true) + offloadNodeStatusRepo.On("List", mock.Anything).Return(map[sqldb.UUIDVersion]v1alpha1.Nodes{}, nil) + w := NewWorkflowArchiveServer(repo, offloadNodeStatusRepo) allowed := true kubeClient.AddReactor("create", "selfsubjectaccessreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &authorizationv1.SelfSubjectAccessReview{ @@ -48,18 +54,20 @@ func Test_archivedWorkflowServer(t *testing.T) { }, nil }) // two pages of results for limit 1 - repo.On("ListWorkflows", "", "", "", time.Time{}, time.Time{}, labels.Requirements(nil), 2, 0).Return(wfv1.Workflows{{}, {}}, nil) - repo.On("ListWorkflows", "", "", "", time.Time{}, time.Time{}, labels.Requirements(nil), 2, 1).Return(wfv1.Workflows{{}}, nil) + repo.On("ListWorkflows", sutils.ListOptions{Limit: 2, Offset: 0}).Return(wfv1.Workflows{{}, {}}, nil) + repo.On("ListWorkflows", sutils.ListOptions{Limit: 2, Offset: 1}).Return(wfv1.Workflows{{}}, nil) minStartAt, _ := time.Parse(time.RFC3339, "2020-01-01T00:00:00Z") maxStartAt, _ := time.Parse(time.RFC3339, "2020-01-02T00:00:00Z") createdTime := metav1.Time{Time: time.Now().UTC()} finishedTime := metav1.Time{Time: createdTime.Add(time.Second * 2)} - repo.On("ListWorkflows", "", "", "", minStartAt, maxStartAt, labels.Requirements(nil), 2, 0).Return(wfv1.Workflows{{}}, nil) - repo.On("ListWorkflows", "", "my-name", "", minStartAt, maxStartAt, labels.Requirements(nil), 2, 0).Return(wfv1.Workflows{{}}, nil) - repo.On("ListWorkflows", "", "", "my-", minStartAt, maxStartAt, labels.Requirements(nil), 2, 0).Return(wfv1.Workflows{{}}, nil) - repo.On("ListWorkflows", "", "my-name", "my-", minStartAt, maxStartAt, labels.Requirements(nil), 2, 0).Return(wfv1.Workflows{{}}, nil) - repo.On("ListWorkflows", "user-ns", "", "", time.Time{}, time.Time{}, labels.Requirements(nil), 2, 0).Return(wfv1.Workflows{{}, {}}, nil) - repo.On("CountWorkflows", "", "my-name", "my-", minStartAt, maxStartAt, labels.Requirements(nil)).Return(int64(5), nil) + repo.On("ListWorkflows", sutils.ListOptions{Namespace: "", Name: "", NamePrefix: "", MinStartedAt: minStartAt, MaxStartedAt: maxStartAt, Limit: 2, Offset: 0}).Return(wfv1.Workflows{{}}, nil) + repo.On("ListWorkflows", sutils.ListOptions{Namespace: "", Name: "my-name", NamePrefix: "", MinStartedAt: minStartAt, MaxStartedAt: maxStartAt, Limit: 2, Offset: 0}).Return(wfv1.Workflows{{}}, nil) + repo.On("ListWorkflows", sutils.ListOptions{Namespace: "", Name: "", NamePrefix: "my-", MinStartedAt: minStartAt, MaxStartedAt: maxStartAt, Limit: 2, Offset: 0}).Return(wfv1.Workflows{{}}, nil) + repo.On("ListWorkflows", sutils.ListOptions{Namespace: "", Name: "my-name", NamePrefix: "my-", MinStartedAt: minStartAt, MaxStartedAt: maxStartAt, Limit: 2, Offset: 0}).Return(wfv1.Workflows{{}}, nil) + repo.On("ListWorkflows", sutils.ListOptions{Namespace: "", Name: "my-name", NamePrefix: "my-", MinStartedAt: minStartAt, MaxStartedAt: maxStartAt, Limit: 2, Offset: 0, ShowRemainingItemCount: true}).Return(wfv1.Workflows{{}}, nil) + repo.On("ListWorkflows", sutils.ListOptions{Namespace: "user-ns", Name: "", NamePrefix: "", MinStartedAt: time.Time{}, MaxStartedAt: time.Time{}, Limit: 2, Offset: 0}).Return(wfv1.Workflows{{}, {}}, nil) + repo.On("CountWorkflows", sutils.ListOptions{Namespace: "", Name: "my-name", NamePrefix: "my-", MinStartedAt: minStartAt, MaxStartedAt: maxStartAt, Limit: 2, Offset: 0}).Return(int64(5), nil) + repo.On("CountWorkflows", sutils.ListOptions{Namespace: "", Name: "my-name", NamePrefix: "my-", MinStartedAt: minStartAt, MaxStartedAt: maxStartAt, Limit: 2, Offset: 0, ShowRemainingItemCount: true}).Return(int64(5), nil) repo.On("GetWorkflow", "", "", "").Return(nil, nil) repo.On("GetWorkflow", "my-uid", "", "").Return(&wfv1.Workflow{ ObjectMeta: metav1.ObjectMeta{Name: "my-name"}, diff --git a/test/e2e/cli_test.go b/test/e2e/cli_test.go index 051e609b3ef4..8569392c96c8 100644 --- a/test/e2e/cli_test.go +++ b/test/e2e/cli_test.go @@ -924,7 +924,7 @@ func (s *CLISuite) TestRetryWorkflowWithContinueOn() { Then(). ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { workflowName = metadata.Name - assert.Equal(t, 6, len(status.Nodes)) + assert.Equal(t, 7, len(status.Nodes)) }). RunCli([]string{"retry", workflowName}, func(t *testing.T, output string, err error) { if assert.NoError(t, err, output) { @@ -940,10 +940,10 @@ func (s *CLISuite) TestRetryWorkflowWithContinueOn() { ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { workflowName = metadata.Name assert.Equal(t, wfv1.WorkflowFailed, status.Phase) - assert.Equal(t, 6, len(status.Nodes)) + assert.Equal(t, 7, len(status.Nodes)) }). ExpectWorkflowNode(func(status wfv1.NodeStatus) bool { - return strings.Contains(status.Name, "retry-workflow-with-continueon.success") + return strings.Contains(status.Name, ".success") }, func(t *testing.T, status *wfv1.NodeStatus, pod *corev1.Pod) { assert.Equal(t, 2, len(status.Children)) }) @@ -1166,6 +1166,64 @@ func (s *CLISuite) TestWorkflowResubmit() { }) } +func (s *CLISuite) TestWorkflowResubmitDAGWithDependencies() { + var wfString string + s.Given(). + Workflow("@testdata/resubmit-dag-with-dependencies.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + RunCli([]string{"resubmit", "--memoized", "@latest"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + assert.Contains(t, output, "Name:") + assert.Contains(t, output, "Namespace:") + assert.Contains(t, output, "ServiceAccount:") + assert.Contains(t, output, "Status:") + assert.Contains(t, output, "Created:") + } + }). + RunCli([]string{"get", "@latest", "-o", "yaml"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + wfString = output + } + }) + + s.Given(). + Workflow(wfString). + When(). + WaitForWorkflow(fixtures.ToBeCompleted). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + assert.Equal(t, 5, len(status.Nodes)) + }). + ExpectWorkflowNode(func(status wfv1.NodeStatus) bool { + return strings.Contains(status.Name, ".A") + }, func(t *testing.T, status *wfv1.NodeStatus, pod *corev1.Pod) { + assert.Equal(t, wfv1.NodeTypeSkipped, status.Type) + assert.Contains(t, status.Message, "original pod") + }). + ExpectWorkflowNode(func(status wfv1.NodeStatus) bool { + return strings.Contains(status.Name, ".B") + }, func(t *testing.T, status *wfv1.NodeStatus, pod *corev1.Pod) { + assert.Equal(t, wfv1.NodeFailed, status.Phase) + assert.Contains(t, status.Message, "exit code 1") + }). + ExpectWorkflowNode(func(status wfv1.NodeStatus) bool { + return strings.Contains(status.Name, ".C") + }, func(t *testing.T, status *wfv1.NodeStatus, pod *corev1.Pod) { + assert.Equal(t, wfv1.NodeTypeSkipped, status.Type) + assert.Contains(t, status.Message, "omitted: depends condition not met") + }). + ExpectWorkflowNode(func(status wfv1.NodeStatus) bool { + return strings.Contains(status.Name, ".D") + }, func(t *testing.T, status *wfv1.NodeStatus, pod *corev1.Pod) { + assert.Equal(t, wfv1.NodeTypeSkipped, status.Type) + assert.Contains(t, status.Message, "omitted: depends condition not met") + }) +} + func (s *CLISuite) TestWorkflowResubmitByLabelSelector() { s.Given(). Workflow("@testdata/exit-1.yaml"). diff --git a/test/e2e/fixtures/e2e_suite.go b/test/e2e/fixtures/e2e_suite.go index f8b06aefa01b..6b415cbc408c 100644 --- a/test/e2e/fixtures/e2e_suite.go +++ b/test/e2e/fixtures/e2e_suite.go @@ -7,6 +7,7 @@ import ( "os" "time" + "github.com/argoproj/argo-workflows/v3/server/utils" "github.com/argoproj/argo-workflows/v3/util/secrets" "github.com/TwiN/go-color" @@ -155,7 +156,10 @@ func (s *E2ESuite) DeleteResources() { archive := s.Persistence.workflowArchive parse, err := labels.ParseToRequirements(Label) s.CheckError(err) - workflows, err := archive.ListWorkflows(Namespace, "", "", time.Time{}, time.Time{}, parse, 0, 0) + workflows, err := archive.ListWorkflows(utils.ListOptions{ + Namespace: Namespace, + LabelRequirements: parse, + }) s.CheckError(err) for _, w := range workflows { err := archive.DeleteWorkflow(string(w.UID)) diff --git a/test/e2e/fixtures/then.go b/test/e2e/fixtures/then.go index a742880ca0cf..adeb16c372ad 100644 --- a/test/e2e/fixtures/then.go +++ b/test/e2e/fixtures/then.go @@ -93,7 +93,7 @@ func (t *Then) ExpectWorkflowNode(selector func(status wfv1.NodeStatus) bool, f ObjectMeta: *metadata, } version := util.GetWorkflowPodNameVersion(wf) - podName := util.GeneratePodName(t.wf.Name, n.Name, n.TemplateName, n.ID, version) + podName := util.GeneratePodName(t.wf.Name, n.Name, util.GetTemplateFromNode(*n), n.ID, version) var err error ctx := context.Background() diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index 46cd1241bebe..b4a82dc5d3e8 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -210,6 +210,11 @@ var ( return node.Type == wfv1.NodeTypePod && node.Phase == wfv1.NodeRunning }), "to have running pod" } + ToHaveFailedPod Condition = func(wf *wfv1.Workflow) (bool, string) { + return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { + return node.Type == wfv1.NodeTypePod && node.Phase == wfv1.NodeFailed + }), "to have failed pod" + } ) // `ToBeDone` replaces `ToFinish` which also makes sure the workflow is both complete not pending archiving. diff --git a/test/e2e/functional/json-argument.yaml b/test/e2e/functional/json-argument.yaml index 1b34757fb440..f6e534152983 100644 --- a/test/e2e/functional/json-argument.yaml +++ b/test/e2e/functional/json-argument.yaml @@ -20,6 +20,6 @@ spec: - name: json2 container: image: quay.io/codefresh/argoexec:latest + imagePullPolicy: Never command: [sh, -c] args: ["echo '{{inputs.parameters.json1}}' | jq . ; echo '{{inputs.parameters.json2}}' | jq ."] - diff --git a/test/e2e/functional/output-jqfilter-parameters.yaml b/test/e2e/functional/output-jqfilter-parameters.yaml index b24537e977d5..f3f9ce13fb38 100644 --- a/test/e2e/functional/output-jqfilter-parameters.yaml +++ b/test/e2e/functional/output-jqfilter-parameters.yaml @@ -53,6 +53,7 @@ spec: - name: value container: image: quay.io/codefresh/argoexec:latest + imagePullPolicy: Never command: [sh, -c] args: [" echo '<{{inputs.parameters.key}}>' > /tmp/input; diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index 740ef42d1967..fe007af2ace1 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -4,6 +4,9 @@ package e2e import ( + "context" + "io" + "strings" "testing" "time" @@ -120,6 +123,116 @@ spec: }) } +func (s *RetryTestSuite) TestWorkflowTemplateWithRetryStrategyInContainerSet() { + var name string + var ns string + s.Given(). + WorkflowTemplate("@testdata/workflow-template-with-containerset.yaml"). + Workflow(` +metadata: + name: workflow-template-containerset +spec: + workflowTemplateRef: + name: containerset-with-retrystrategy +`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, status.Phase, wfv1.WorkflowFailed) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "workflow-template-containerset" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + name = pod.GetName() + ns = pod.GetNamespace() + }) + // Success, no need retry + s.Run("ContainerLogs", func() { + ctx := context.Background() + podLogOptions := &apiv1.PodLogOptions{Container: "c1"} + stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) + assert.Nil(s.T(), err) + defer stream.Close() + logBytes, err := io.ReadAll(stream) + assert.Nil(s.T(), err) + output := string(logBytes) + count := strings.Count(output, "capturing logs") + assert.Equal(s.T(), 1, count) + assert.Contains(s.T(), output, "hi") + }) + // Command err. No retry logic is entered. + s.Run("ContainerLogs", func() { + ctx := context.Background() + podLogOptions := &apiv1.PodLogOptions{Container: "c2"} + stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) + assert.Nil(s.T(), err) + defer stream.Close() + logBytes, err := io.ReadAll(stream) + assert.Nil(s.T(), err) + output := string(logBytes) + count := strings.Count(output, "capturing logs") + assert.Equal(s.T(), 0, count) + assert.Contains(s.T(), output, "executable file not found in $PATH") + }) + // Retry when err. + s.Run("ContainerLogs", func() { + ctx := context.Background() + podLogOptions := &apiv1.PodLogOptions{Container: "c3"} + stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) + assert.Nil(s.T(), err) + defer stream.Close() + logBytes, err := io.ReadAll(stream) + assert.Nil(s.T(), err) + output := string(logBytes) + count := strings.Count(output, "capturing logs") + assert.Equal(s.T(), 2, count) + countFailureInfo := strings.Count(output, "intentional failure") + assert.Equal(s.T(), 2, countFailureInfo) + }) +} + +func (s *RetryTestSuite) TestRetryNodeAntiAffinity() { + s.Given(). + Workflow(` +metadata: + name: test-nodeantiaffinity-strategy +spec: + entrypoint: main + templates: + - name: main + retryStrategy: + limit: '1' + retryPolicy: "Always" + affinity: + nodeAntiAffinity: {} + container: + name: main + image: 'argoproj/argosay:v2' + args: [ exit, "1" ] +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToHaveFailedPod). + Wait(5 * time.Second). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + if status.Phase == wfv1.WorkflowFailed { + nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") + nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") + assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) + } + if status.Phase == wfv1.WorkflowRunning { + nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") + nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") + assert.Contains(t, nodeStatusRetry.Message, "1 node(s) didn't match Pod's node affinity/selector") + assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) + } + }) +} + func TestRetrySuite(t *testing.T) { suite.Run(t, new(RetryTestSuite)) } diff --git a/test/e2e/signals_test.go b/test/e2e/signals_test.go index c7e92d115e0f..873c16014096 100644 --- a/test/e2e/signals_test.go +++ b/test/e2e/signals_test.go @@ -31,7 +31,7 @@ func (s *SignalsSuite) TestStopBehavior() { SubmitWorkflow(). WaitForWorkflow(fixtures.ToHaveRunningPod, killDuration). ShutdownWorkflow(wfv1.ShutdownStrategyStop). - WaitForWorkflow(killDuration + 10*time.Second). // this one takes especially long in CI + WaitForWorkflow(killDuration + 15*time.Second). // this one takes especially long in CI Then(). ExpectWorkflow(func(t *testing.T, m *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Contains(t, []wfv1.WorkflowPhase{wfv1.WorkflowFailed, wfv1.WorkflowError}, status.Phase) diff --git a/test/e2e/testdata/artifact-workflow-stopped.yaml b/test/e2e/testdata/artifact-workflow-stopped.yaml index 599b9823a135..ae85aa207a3f 100644 --- a/test/e2e/testdata/artifact-workflow-stopped.yaml +++ b/test/e2e/testdata/artifact-workflow-stopped.yaml @@ -53,8 +53,9 @@ spec: done - name: workflow-stopper container: - image: argoproj/argocli:latest - args: + image: quay.io/argoproj/argocli:latest + imagePullPolicy: Never + args: - stop - -l - workflows.argoproj.io/workflow=wf-stopped @@ -113,4 +114,4 @@ spec: name: my-minio-cred key: secretkey archive: - none: {} \ No newline at end of file + none: {} diff --git a/test/e2e/testdata/artifactgc/artgc-dag-wf-self-delete.yaml b/test/e2e/testdata/artifactgc/artgc-dag-wf-self-delete.yaml index 0b216acef631..ddce1199d629 100644 --- a/test/e2e/testdata/artifactgc/artgc-dag-wf-self-delete.yaml +++ b/test/e2e/testdata/artifactgc/artgc-dag-wf-self-delete.yaml @@ -59,8 +59,9 @@ spec: done - name: artgc-dag-workflow-deleter container: - image: argoproj/argocli:latest - args: + image: quay.io/argoproj/argocli:latest + imagePullPolicy: Never + args: - delete - -l - workflows.argoproj.io/workflow=artgc-dag-wf-self-delete @@ -96,4 +97,4 @@ spec: name: my-minio-cred key: secretkey artifactGC: - strategy: OnWorkflowDeletion \ No newline at end of file + strategy: OnWorkflowDeletion diff --git a/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped-pod-gc-on-pod-completion.yaml b/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped-pod-gc-on-pod-completion.yaml index e5e6430c8e1e..a1cf99967bb1 100644 --- a/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped-pod-gc-on-pod-completion.yaml +++ b/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped-pod-gc-on-pod-completion.yaml @@ -60,8 +60,9 @@ spec: done - name: artgc-dag-workflow-stopper container: - image: argoproj/argocli:latest - args: + image: quay.io/argoproj/argocli:latest + imagePullPolicy: Never + args: - stop - -l - workflows.argoproj.io/workflow=artgc-dag-wf-stopped-pod-gc-on-pod-completion @@ -132,4 +133,4 @@ spec: name: my-minio-cred key: secretkey artifactGC: - strategy: OnWorkflowDeletion \ No newline at end of file + strategy: OnWorkflowDeletion diff --git a/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped.yaml b/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped.yaml index 5480f3fce0a7..12251ae51238 100644 --- a/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped.yaml +++ b/test/e2e/testdata/artifactgc/artgc-dag-wf-stopped.yaml @@ -62,8 +62,9 @@ spec: done - name: artgc-dag-workflow-stopper container: - image: argoproj/argocli:latest - args: + image: quay.io/argoproj/argocli:latest + imagePullPolicy: Never + args: - stop - -l - workflows.argoproj.io/workflow=artgc-dag-wf-stopped @@ -134,4 +135,4 @@ spec: name: my-minio-cred key: secretkey artifactGC: - strategy: OnWorkflowDeletion \ No newline at end of file + strategy: OnWorkflowDeletion diff --git a/test/e2e/testdata/resubmit-dag-with-dependencies.yaml b/test/e2e/testdata/resubmit-dag-with-dependencies.yaml new file mode 100644 index 000000000000..18d5afa79266 --- /dev/null +++ b/test/e2e/testdata/resubmit-dag-with-dependencies.yaml @@ -0,0 +1,35 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: resubmit-dag-with-dependencies- + labels: + workflow: resubmit-dag-with-dependencies +spec: + entrypoint: rand-fail-dag + templates: + - name: rand-fail-dag + dag: + tasks: + - name: A + template: success + - name: B + template: fail + depends: A + - name: C + depends: "B" + template: success + - name: D + depends: "C" + template: success + - name: fail + container: + image: busybox + command: ["sh", -c] + args: + - exit 1 + - name: success + container: + image: busybox + command: ["sh", -c] + args: + - exit 0 \ No newline at end of file diff --git a/test/e2e/testdata/retry-on-stopped.yaml b/test/e2e/testdata/retry-on-stopped.yaml index 88960dedb828..6b7276f570cb 100644 --- a/test/e2e/testdata/retry-on-stopped.yaml +++ b/test/e2e/testdata/retry-on-stopped.yaml @@ -29,6 +29,7 @@ spec: - name: stop container: image: quay.io/argoproj/argocli:latest + imagePullPolicy: Never args: - stop - -l diff --git a/test/e2e/testdata/retry-workflow-with-continueon.yaml b/test/e2e/testdata/retry-workflow-with-continueon.yaml index 9d2ce1414860..43a3b5670078 100644 --- a/test/e2e/testdata/retry-workflow-with-continueon.yaml +++ b/test/e2e/testdata/retry-workflow-with-continueon.yaml @@ -6,6 +6,9 @@ spec: entrypoint: dag templates: - name: dag + retryStrategy: + limit: 2 + retryPolicy: OnError dag: failFast: false tasks: diff --git a/test/e2e/testdata/workflow-template-with-containerset.yaml b/test/e2e/testdata/workflow-template-with-containerset.yaml new file mode 100644 index 000000000000..b2f4c32a880a --- /dev/null +++ b/test/e2e/testdata/workflow-template-with-containerset.yaml @@ -0,0 +1,32 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: containerset-with-retrystrategy + annotations: + workflows.argoproj.io/description: | + This workflow creates a container set with a retryStrategy. +spec: + entrypoint: test + templates: + - name: test + containerSet: + retryStrategy: + retries: "2" + containers: + - name: c1 + image: python:alpine3.6 + command: + - python + - -c + args: + - | + print("hi") + - name: c2 + image: python:alpine3.6 + command: + - invalid + - command + - name: c3 + image: alpine:latest + command: [ sh, -c ] + args: [ "echo intentional failure; exit 1" ] \ No newline at end of file diff --git a/ui/src/app/workflows/components/resubmit-workflow-panel.tsx b/ui/src/app/workflows/components/resubmit-workflow-panel.tsx index a23476de06b1..9b2ed35769b7 100644 --- a/ui/src/app/workflows/components/resubmit-workflow-panel.tsx +++ b/ui/src/app/workflows/components/resubmit-workflow-panel.tsx @@ -1,9 +1,10 @@ import {Checkbox} from 'argo-ui'; -import React, {useState} from 'react'; +import React, {useContext, useState} from 'react'; import {Parameter, ResubmitOpts, Workflow} from '../../../models'; import {uiUrl} from '../../shared/base'; import {ErrorNotice} from '../../shared/components/error-notice'; import {ParametersInput} from '../../shared/components/parameters-input/parameters-input'; +import {Context} from '../../shared/context'; import {services} from '../../shared/services'; import {Utils} from '../../shared/utils'; @@ -13,6 +14,7 @@ interface Props { } export function ResubmitWorkflowPanel(props: Props) { + const {navigation} = useContext(Context); const [overrideParameters, setOverrideParameters] = useState(false); const [workflowParameters, setWorkflowParameters] = useState(JSON.parse(JSON.stringify(props.workflow.spec.arguments.parameters || []))); const [memoized, setMemoized] = useState(false); @@ -33,7 +35,7 @@ export function ResubmitWorkflowPanel(props: Props) { const submitted = props.isArchived ? await services.workflows.resubmitArchived(props.workflow.metadata.uid, props.workflow.metadata.namespace, opts) : await services.workflows.resubmit(props.workflow.metadata.name, props.workflow.metadata.namespace, opts); - document.location.href = uiUrl(`workflows/${submitted.metadata.namespace}/${submitted.metadata.name}`); + navigation.goto(uiUrl(`workflows/${submitted.metadata.namespace}/${submitted.metadata.name}`)); } catch (err) { setError(err); setIsSubmitting(false); diff --git a/ui/src/app/workflows/components/retry-workflow-panel.tsx b/ui/src/app/workflows/components/retry-workflow-panel.tsx index b274d9ebc0ac..4aa5448da661 100644 --- a/ui/src/app/workflows/components/retry-workflow-panel.tsx +++ b/ui/src/app/workflows/components/retry-workflow-panel.tsx @@ -1,9 +1,10 @@ import {Checkbox} from 'argo-ui'; -import React, {useState} from 'react'; +import React, {useContext, useState} from 'react'; import {Parameter, RetryOpts, Workflow} from '../../../models'; import {uiUrl} from '../../shared/base'; import {ErrorNotice} from '../../shared/components/error-notice'; import {ParametersInput} from '../../shared/components/parameters-input/parameters-input'; +import {Context} from '../../shared/context'; import {services} from '../../shared/services'; import {Utils} from '../../shared/utils'; @@ -14,6 +15,7 @@ interface Props { } export function RetryWorkflowPanel(props: Props) { + const {navigation} = useContext(Context); const [overrideParameters, setOverrideParameters] = useState(false); const [restartSuccessful, setRestartSuccessful] = useState(false); const [workflowParameters, setWorkflowParameters] = useState(JSON.parse(JSON.stringify(props.workflow.spec.arguments.parameters || []))); @@ -37,7 +39,7 @@ export function RetryWorkflowPanel(props: Props) { props.isArchived && !props.isWorkflowInCluster ? await services.workflows.retryArchived(props.workflow.metadata.uid, props.workflow.metadata.namespace, opts) : await services.workflows.retry(props.workflow.metadata.name, props.workflow.metadata.namespace, opts); - document.location.href = uiUrl(`workflows/${submitted.metadata.namespace}/${submitted.metadata.name}`); + navigation.goto(uiUrl(`workflows/${submitted.metadata.namespace}/${submitted.metadata.name}#`)); // add # at the end to reset query params to close panel } catch (err) { setError(err); setIsSubmitting(false); diff --git a/ui/src/app/workflows/components/submit-workflow-panel.tsx b/ui/src/app/workflows/components/submit-workflow-panel.tsx index 90fd2bd31df8..89d9500e1db7 100644 --- a/ui/src/app/workflows/components/submit-workflow-panel.tsx +++ b/ui/src/app/workflows/components/submit-workflow-panel.tsx @@ -1,10 +1,11 @@ import {Select} from 'argo-ui'; -import React, {useMemo, useState} from 'react'; +import React, {useContext, useMemo, useState} from 'react'; import {Parameter, Template} from '../../../models'; import {uiUrl} from '../../shared/base'; import {ErrorNotice} from '../../shared/components/error-notice'; import {ParametersInput} from '../../shared/components/parameters-input/parameters-input'; import {TagsInput} from '../../shared/components/tags-input/tags-input'; +import {Context} from '../../shared/context'; import {services} from '../../shared/services'; import {Utils} from '../../shared/utils'; @@ -26,6 +27,7 @@ const defaultTemplate: Template = { }; export function SubmitWorkflowPanel(props: Props) { + const {navigation} = useContext(Context); const [entrypoint, setEntrypoint] = useState(workflowEntrypoint); const [parameters, setParameters] = useState([]); const [workflowParameters, setWorkflowParameters] = useState(JSON.parse(JSON.stringify(props.workflowParameters))); @@ -55,7 +57,7 @@ export function SubmitWorkflowPanel(props: Props) { ], labels: labels.join(',') }); - document.location.href = uiUrl(`workflows/${submitted.metadata.namespace}/${submitted.metadata.name}`); + navigation.goto(uiUrl(`workflows/${submitted.metadata.namespace}/${submitted.metadata.name}`)); } catch (err) { setError(err); setIsSubmitting(false); diff --git a/ui/src/app/workflows/components/workflow-details/workflow-details.tsx b/ui/src/app/workflows/components/workflow-details/workflow-details.tsx index 7e094c5dd6c1..1864749cca65 100644 --- a/ui/src/app/workflows/components/workflow-details/workflow-details.tsx +++ b/ui/src/app/workflows/components/workflow-details/workflow-details.tsx @@ -93,9 +93,9 @@ export function WorkflowDetails({history, location, match}: RouteComponentProps< // boiler-plate const {navigation, popup} = useContext(Context); const queryParams = new URLSearchParams(location.search); + const namespace = match.params.namespace; + const name = match.params.name; - const [namespace] = useState(match.params.namespace); - const [name, setName] = useState(match.params.name); const [tab, setTab] = useState(queryParams.get('tab') || 'workflow'); const [uid, setUid] = useState(queryParams.get('uid') || ''); const [nodeId, setNodeId] = useState(queryParams.get('nodeId')); @@ -106,8 +106,8 @@ export function WorkflowDetails({history, location, match}: RouteComponentProps< const [workflow, setWorkflow] = useState(); const [links, setLinks] = useState(); const [error, setError] = useState(); - const selectedNode = workflow && workflow.status && workflow.status.nodes && workflow.status.nodes[nodeId]; - const selectedArtifact = workflow && workflow.status && findArtifact(workflow.status, nodeId); + const selectedNode = workflow?.status?.nodes?.[nodeId]; + const selectedArtifact = workflow?.status && findArtifact(workflow.status, nodeId); const [selectedTemplateArtifactRepo, setSelectedTemplateArtifactRepo] = useState(); const isSidePanelExpanded = !!(selectedNode || selectedArtifact); const isSidePanelAnimating = useTransition(isSidePanelExpanded, ANIMATION_MS + ANIMATION_BUFFER_MS); @@ -213,9 +213,6 @@ export function WorkflowDetails({history, location, match}: RouteComponentProps< } navigation.goto(uiUrl(`workflows/${workflow.metadata.namespace}`)); - // TODO: This is a temporary workaround so that the list of workflows - // is correctly displayed. Workflow list page needs to be more responsive. - window.location.reload(); }); } else if (workflowOperation.title === 'RESUBMIT') { setSidePanel('resubmit'); @@ -227,12 +224,7 @@ export function WorkflowDetails({history, location, match}: RouteComponentProps< return; } - workflowOperation - .action(workflow) - .then((wf: Workflow) => { - setName(wf.metadata.name); - }) - .catch(setError); + workflowOperation.action(workflow).catch(setError); }); } } @@ -374,7 +366,7 @@ export function WorkflowDetails({history, location, match}: RouteComponentProps< if (e.type === 'DELETED') { setUid(e.object.metadata.uid); setError(new Error('Workflow gone')); - if (e.object.metadata.labels[archivalStatus]) { + if (e.object.metadata.labels?.[archivalStatus]) { e.object.metadata.labels[archivalStatus] = 'Persisted'; } setWorkflow(e.object); @@ -396,33 +388,30 @@ export function WorkflowDetails({history, location, match}: RouteComponentProps< // Get workflow useEffect(() => { (async () => { - let archivedWf: Workflow; - if (uid !== '') { - try { - archivedWf = await services.workflows.getArchived(namespace, uid); - setError(null); - } catch (err) { - if (err.status !== 404) { - setError(err); - } - } - } - try { const wf = await services.workflows.get(namespace, name); + setUid(wf.metadata.uid); + setWorkflow(wf); setError(null); - // If we find live workflow which has same uid, we use live workflow. - if (!archivedWf || archivedWf.metadata.uid === wf.metadata.uid) { - setWorkflow(wf); - setUid(wf.metadata.uid); - } else { - setWorkflow(archivedWf); - } + return; } catch (err) { - if (archivedWf) { - setWorkflow(archivedWf); - } else { + if (err.status !== 404 && uid === '') { setError(err); + return; + } + + try { + const archivedWf = await services.workflows.getArchived(namespace, uid); + setWorkflow(archivedWf); + setError(null); + return; + } catch (archiveErr) { + if (archiveErr.status === 500 && archiveErr.response.body.message === 'getting archived workflows not supported') { + setError(err); + return; + } + + setError(archiveErr); } } })(); @@ -587,7 +576,7 @@ export function WorkflowDetails({history, location, match}: RouteComponentProps< diff --git a/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx b/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx index 47446937b004..1a58d2fb412b 100644 --- a/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx +++ b/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx @@ -25,7 +25,7 @@ const timezones = Intl.supportedValuesOf('timeZone'); interface WorkflowLogsViewerProps { workflow: models.Workflow; - nodeId?: string; + initialNodeId?: string; initialPodName: string; container: string; archived: boolean; @@ -74,7 +74,7 @@ function parseAndTransform(formattedString: string, timeZone: string) { } } -export function WorkflowLogsViewer({workflow, nodeId, initialPodName, container, archived}: WorkflowLogsViewerProps) { +export function WorkflowLogsViewer({workflow, initialNodeId, initialPodName, container, archived}: WorkflowLogsViewerProps) { const storage = new ScopedLocalStorage('workflow-logs-viewer'); const storedJsonFields = storage.getItem('jsonFields', { values: [] @@ -162,7 +162,6 @@ export function WorkflowLogsViewer({workflow, nodeId, initialPodName, container, // map pod names to corresponding node IDs const podNamesToNodeIDs = new Map(); - const podNames = [{value: '', label: 'All'}].concat( Object.values(workflow.status.nodes || {}) .filter(x => x.type === 'Pod') @@ -175,8 +174,10 @@ export function WorkflowLogsViewer({workflow, nodeId, initialPodName, container, }) ); + // default to the node id of the pod + const nodeId = initialNodeId || podNamesToNodeIDs.get(podName); const node = workflow.status.nodes[nodeId]; - const templates = execSpec(workflow).templates.filter(t => !node || t.name === node.templateName); + const templates = execSpec(workflow).templates.filter(t => !node || t.name === getTemplateNameFromNode(node)); const containers = [ ...new Set( diff --git a/ui/src/models/workflows.ts b/ui/src/models/workflows.ts index 8acf9178bb99..bdb03f0180bd 100644 --- a/ui/src/models/workflows.ts +++ b/ui/src/models/workflows.ts @@ -545,7 +545,9 @@ export function isWorkflowInCluster(wf: Workflow): boolean { if (!wf) { return false; } - return !wf.metadata.labels[archivalStatus] || wf.metadata.labels[archivalStatus] === 'Pending' || wf.metadata.labels[archivalStatus] === 'Archived'; + + const labelValue = wf.metadata?.labels?.[archivalStatus]; + return !labelValue || labelValue === 'Pending' || labelValue === 'Archived'; } export function isArchivedWorkflow(wf?: Workflow): boolean { diff --git a/util/errors/errors.go b/util/errors/errors.go index 496176070128..2982be216fd7 100644 --- a/util/errors/errors.go +++ b/util/errors/errors.go @@ -37,7 +37,8 @@ func IsTransientErr(err error) bool { apierr.IsServiceUnavailable(err) || isTransientEtcdErr(err) || matchTransientErrPattern(err) || - errors.Is(err, NewErrTransient("")) + errors.Is(err, NewErrTransient("")) || + isTransientSqbErr(err) if isTransient { log.Infof("Transient error: %v", err) } else { @@ -123,3 +124,7 @@ func generateErrorString(err error) string { } return errorString } + +func isTransientSqbErr(err error) bool { + return strings.Contains(err.Error(), "upper: no more rows in") +} diff --git a/util/printer/workflow-printer.go b/util/printer/workflow-printer.go index ea6ca705637c..ebd718029c1e 100644 --- a/util/printer/workflow-printer.go +++ b/util/printer/workflow-printer.go @@ -137,7 +137,7 @@ func countPendingRunningCompletedNodes(wf *wfv1.Workflow) (int, int, int) { running := 0 completed := 0 for _, node := range wf.Status.Nodes { - tmpl := wf.GetTemplateByName(node.TemplateName) + tmpl := wf.GetTemplateByName(util.GetTemplateFromNode(node)) if tmpl == nil || !tmpl.IsPodType() { continue } diff --git a/workflow/artifacts/oss/oss.go b/workflow/artifacts/oss/oss.go index d414a4c02a73..69f96e250b18 100644 --- a/workflow/artifacts/oss/oss.go +++ b/workflow/artifacts/oss/oss.go @@ -1,6 +1,7 @@ package oss import ( + "crypto/sha256" "fmt" "io" "math" @@ -16,7 +17,6 @@ import ( "github.com/aliyun/credentials-go/credentials" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/utils/pointer" "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" @@ -202,7 +202,9 @@ func (ossDriver *ArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact objectName := outputArtifact.OSS.Key if outputArtifact.OSS.LifecycleRule != nil { err = setBucketLifecycleRule(osscli, outputArtifact.OSS) - return !isTransientOSSErr(err), err + if err != nil { + return !isTransientOSSErr(err), err + } } if isDir { if err = putDirectory(bucket, objectName, path); err != nil { @@ -292,33 +294,33 @@ func setBucketLifecycleRule(client *oss.Client, ossArtifact *wfv1.OSSArtifact) e return fmt.Errorf("markInfrequentAccessAfterDays cannot be large than markDeletionAfterDays") } - // Set expiration rule. - expirationRule := oss.BuildLifecycleRuleByDays("expiration-rule", ossArtifact.Key, true, markInfrequentAccessAfterDays) - // Automatically delete the expired delete tag so we don't have to manage it ourselves. + // Delete the current version objects after a period of time. + // If BucketVersioning is enbaled, the objects will turn to non-current version. expiration := oss.LifecycleExpiration{ - ExpiredObjectDeleteMarker: pointer.BoolPtr(true), + Days: markDeletionAfterDays, } // Convert to Infrequent Access (IA) storage type for objects that are expired after a period of time. - versionTransition := oss.LifecycleVersionTransition{ - NoncurrentDays: markInfrequentAccessAfterDays, - StorageClass: oss.StorageIA, + transition := oss.LifecycleTransition{ + Days: markInfrequentAccessAfterDays, + StorageClass: oss.StorageIA, } - // Mark deletion after a period of time. - versionExpiration := oss.LifecycleVersionExpiration{ - NoncurrentDays: markDeletionAfterDays, + // Delete the aborted uploaded parts after a period of time. + abortMultipartUpload := oss.LifecycleAbortMultipartUpload{ + Days: markDeletionAfterDays, } - versionTransitionRule := oss.LifecycleRule{ - ID: "version-transition-rule", - Prefix: ossArtifact.Key, - Status: string(oss.VersionEnabled), - Expiration: &expiration, - NonVersionExpiration: &versionExpiration, - NonVersionTransitions: []oss.LifecycleVersionTransition{versionTransition}, + + keySha := fmt.Sprintf("%x", sha256.Sum256([]byte(ossArtifact.Key))) + rule := oss.LifecycleRule{ + ID: keySha, + Prefix: ossArtifact.Key, + Status: string(oss.VersionEnabled), + Expiration: &expiration, + Transitions: []oss.LifecycleTransition{transition}, + AbortMultipartUpload: &abortMultipartUpload, } // Set lifecycle rules to the bucket. - rules := []oss.LifecycleRule{expirationRule, versionTransitionRule} - err := client.SetBucketLifecycle(ossArtifact.Bucket, rules) + err := client.SetBucketLifecycle(ossArtifact.Bucket, []oss.LifecycleRule{rule}) return err } diff --git a/workflow/controller/artifact_gc.go b/workflow/controller/artifact_gc.go index a6fcbca52e79..7bb8ed627d05 100644 --- a/workflow/controller/artifact_gc.go +++ b/workflow/controller/artifact_gc.go @@ -179,10 +179,7 @@ func (woc *wfOperationCtx) processArtifactGCStrategy(ctx context.Context, strate woc.log.Errorf("Was unable to obtain node for %s", artifactSearchResult.NodeID) return fmt.Errorf("can't process Artifact GC Strategy %s: node ID %q not found in Status??", strategy, artifactSearchResult.NodeID) } - templateName := node.TemplateName - if templateName == "" && node.GetTemplateRef() != nil { - templateName = node.GetTemplateRef().Template - } + templateName := util.GetTemplateFromNode(*node) if templateName == "" { return fmt.Errorf("can't process Artifact GC Strategy %s: node %+v has an unnamed template", strategy, node) } diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index 60aa9f4b1748..36fbc037eeb6 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -1021,7 +1021,7 @@ func (wfc *WorkflowController) addWorkflowInformerHandlers(ctx context.Context) DeleteFunc: func(obj interface{}) { wf, ok := obj.(*unstructured.Unstructured) if ok { // maybe cache.DeletedFinalStateUnknown - wfc.metrics.StopRealtimeMetricsForKey(string(wf.GetUID())) + wfc.metrics.DeleteRealtimeMetricsForKey(string(wf.GetUID())) } }, }) diff --git a/workflow/controller/estimation/estimator_factory.go b/workflow/controller/estimation/estimator_factory.go index 60311eb8f5c3..984e76d98cdc 100644 --- a/workflow/controller/estimation/estimator_factory.go +++ b/workflow/controller/estimation/estimator_factory.go @@ -2,7 +2,6 @@ package estimation import ( "fmt" - "time" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" @@ -10,6 +9,7 @@ import ( "github.com/argoproj/argo-workflows/v3/persist/sqldb" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/server/utils" "github.com/argoproj/argo-workflows/v3/workflow/common" "github.com/argoproj/argo-workflows/v3/workflow/controller/indexes" "github.com/argoproj/argo-workflows/v3/workflow/hydrator" @@ -76,7 +76,13 @@ func (f *estimatorFactory) NewEstimator(wf *wfv1.Workflow) (Estimator, error) { if err != nil { return defaultEstimator, fmt.Errorf("failed to parse selector to requirements: %v", err) } - workflows, err := f.wfArchive.ListWorkflows(wf.Namespace, "", "", time.Time{}, time.Time{}, requirements, 1, 0) + workflows, err := f.wfArchive.ListWorkflows( + utils.ListOptions{ + Namespace: wf.Namespace, + LabelRequirements: requirements, + Limit: 1, + Offset: 0, + }) if err != nil { return defaultEstimator, fmt.Errorf("failed to list archived workflows: %v", err) } diff --git a/workflow/controller/estimation/estimator_factory_test.go b/workflow/controller/estimation/estimator_factory_test.go index aeef2a7128c3..c4bb0ab06981 100644 --- a/workflow/controller/estimation/estimator_factory_test.go +++ b/workflow/controller/estimation/estimator_factory_test.go @@ -2,7 +2,6 @@ package estimation import ( "testing" - "time" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -10,6 +9,7 @@ import ( sqldbmocks "github.com/argoproj/argo-workflows/v3/persist/sqldb/mocks" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/server/utils" testutil "github.com/argoproj/argo-workflows/v3/test/util" "github.com/argoproj/argo-workflows/v3/workflow/common" "github.com/argoproj/argo-workflows/v3/workflow/controller/indexes" @@ -53,7 +53,11 @@ metadata: wfArchive := &sqldbmocks.WorkflowArchive{} r, err := labels.ParseToRequirements("workflows.argoproj.io/phase=Succeeded,workflows.argoproj.io/workflow-template=my-archived-wftmpl") assert.NoError(t, err) - wfArchive.On("ListWorkflows", "my-ns", "", "", time.Time{}, time.Time{}, labels.Requirements(r), 1, 0).Return(wfv1.Workflows{ + wfArchive.On("ListWorkflows", utils.ListOptions{ + Namespace: "my-ns", + LabelRequirements: r, + Limit: 1, + }).Return(wfv1.Workflows{ *testutil.MustUnmarshalWorkflow(` metadata: name: my-archived-wftmpl-baseline`), diff --git a/workflow/controller/exec_control.go b/workflow/controller/exec_control.go index d6ddf9fdce4c..a58ac0376776 100644 --- a/workflow/controller/exec_control.go +++ b/workflow/controller/exec_control.go @@ -114,7 +114,7 @@ func (woc *wfOperationCtx) killDaemonedChildren(nodeID string) { if !childNode.IsDaemoned() { continue } - podName := util.GeneratePodName(woc.wf.Name, childNode.Name, childNode.TemplateName, childNode.ID, util.GetWorkflowPodNameVersion(woc.wf)) + podName := util.GeneratePodName(woc.wf.Name, childNode.Name, util.GetTemplateFromNode(childNode), childNode.ID, util.GetWorkflowPodNameVersion(woc.wf)) woc.controller.queuePodForCleanup(woc.wf.Namespace, podName, terminateContainers) childNode.Phase = wfv1.NodeSucceeded childNode.Daemoned = nil diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 4042f98e608f..6e3bc5adfbac 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -405,9 +405,9 @@ func (woc *wfOperationCtx) operate(ctx context.Context) { failedNodeStatus{ DisplayName: node.DisplayName, Message: node.Message, - TemplateName: node.TemplateName, + TemplateName: wfutil.GetTemplateFromNode(node), Phase: string(node.Phase), - PodName: wfutil.GeneratePodName(woc.wf.Name, node.Name, node.TemplateName, node.ID, wfutil.GetPodNameVersion()), + PodName: wfutil.GeneratePodName(woc.wf.Name, node.Name, wfutil.GetTemplateFromNode(node), node.ID, wfutil.GetPodNameVersion()), FinishedAt: node.FinishedAt, }) } @@ -800,6 +800,7 @@ func (woc *wfOperationCtx) persistUpdates(ctx context.Context) { // Make sure the workflow completed. if woc.wf.Status.Fulfilled() { + woc.controller.metrics.StopRealtimeMetricsForKey(string(woc.wf.GetUID())) if err := woc.deleteTaskResults(ctx); err != nil { woc.log.WithError(err).Warn("failed to delete task-results") } @@ -1334,7 +1335,7 @@ func (woc *wfOperationCtx) assessNodeStatus(pod *apiv1.Pod, old *wfv1.NodeStatus new.Phase = wfv1.NodeSucceeded } else { new.Phase, new.Message = woc.inferFailedReason(pod, tmpl) - woc.log.WithField("displayName", old.DisplayName).WithField("templateName", old.TemplateName). + woc.log.WithField("displayName", old.DisplayName).WithField("templateName", wfutil.GetTemplateFromNode(*old)). WithField("pod", pod.Name).Infof("Pod failed: %s", new.Message) } new.Daemoned = nil @@ -2624,7 +2625,7 @@ func (woc *wfOperationCtx) getPodByNode(node *wfv1.NodeStatus) (*apiv1.Pod, erro return nil, fmt.Errorf("Expected node type %s, got %s", wfv1.NodeTypePod, node.Type) } - podName := woc.getPodName(node.Name, node.TemplateName) + podName := woc.getPodName(node.Name, wfutil.GetTemplateFromNode(*node)) return woc.controller.getPod(woc.wf.GetNamespace(), podName) } diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index 76bcaca1ff5c..d16607b8f54b 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -7380,6 +7380,107 @@ func TestRetryOnDiffHost(t *testing.T) { assert.Equal(t, sourceNodeSelectorRequirement, targetNodeSelectorRequirement) } +var nodeAntiAffinityWorkflow = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: retry-fail +spec: + entrypoint: retry-fail + templates: + - name: retry-fail + retryStrategy: + limit: 2 + retryPolicy: "Always" + affinity: + nodeAntiAffinity: {} + script: + image: python:alpine3.6 + command: [python] + source: | + exit(1) +` + +func TestRetryOnNodeAntiAffinity(t *testing.T) { + wf := wfv1.MustUnmarshalWorkflow(nodeAntiAffinityWorkflow) + cancel, controller := newController(wf) + defer cancel() + + ctx := context.Background() + woc := newWorkflowOperationCtx(wf, controller) + woc.operate(ctx) + + pods, err := listPods(woc) + assert.NoError(t, err) + assert.Equal(t, 1, len(pods.Items)) + + // First retry + pod := pods.Items[0] + pod.Spec.NodeName = "node0" + _, err = controller.kubeclientset.CoreV1().Pods(woc.wf.GetNamespace()).Update(ctx, &pod, metav1.UpdateOptions{}) + assert.NoError(t, err) + makePodsPhase(ctx, woc, apiv1.PodFailed) + woc.operate(ctx) + + node := woc.wf.Status.Nodes.FindByDisplayName("retry-fail(0)") + if assert.NotNil(t, node) { + assert.Equal(t, wfv1.NodeFailed, node.Phase) + assert.Equal(t, "node0", node.HostNodeName) + } + + pods, err = listPods(woc) + assert.NoError(t, err) + assert.Equal(t, 2, len(pods.Items)) + + var podRetry1 apiv1.Pod + for _, p := range pods.Items { + if p.Name != pod.GetName() { + podRetry1 = p + } + } + + hostSelector := "kubernetes.io/hostname" + targetNodeSelectorRequirement := podRetry1.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0] + sourceNodeSelectorRequirement := apiv1.NodeSelectorRequirement{ + Key: hostSelector, + Operator: apiv1.NodeSelectorOpNotIn, + Values: []string{node.HostNodeName}, + } + assert.Equal(t, sourceNodeSelectorRequirement, targetNodeSelectorRequirement) + + // Second retry + podRetry1.Spec.NodeName = "node1" + _, err = controller.kubeclientset.CoreV1().Pods(woc.wf.GetNamespace()).Update(ctx, &podRetry1, metav1.UpdateOptions{}) + assert.NoError(t, err) + makePodsPhase(ctx, woc, apiv1.PodFailed) + woc.operate(ctx) + + node1 := woc.wf.Status.Nodes.FindByDisplayName("retry-fail(1)") + if assert.NotNil(t, node) { + assert.Equal(t, wfv1.NodeFailed, node1.Phase) + assert.Equal(t, "node1", node1.HostNodeName) + } + + pods, err = listPods(woc) + assert.NoError(t, err) + assert.Equal(t, 3, len(pods.Items)) + + var podRetry2 apiv1.Pod + for _, p := range pods.Items { + if p.Name != pod.GetName() && p.Name != podRetry1.GetName() { + podRetry2 = p + } + } + + targetNodeSelectorRequirement = podRetry2.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0] + sourceNodeSelectorRequirement = apiv1.NodeSelectorRequirement{ + Key: hostSelector, + Operator: apiv1.NodeSelectorOpNotIn, + Values: []string{node1.HostNodeName, node.HostNodeName}, + } + assert.Equal(t, sourceNodeSelectorRequirement, targetNodeSelectorRequirement) +} + var noPodsWhenShutdown = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow diff --git a/workflow/controller/retry_tweak.go b/workflow/controller/retry_tweak.go index a2d1d3baceed..5b7bc815dd03 100644 --- a/workflow/controller/retry_tweak.go +++ b/workflow/controller/retry_tweak.go @@ -15,23 +15,18 @@ type RetryTweak = func(retryStrategy wfv1.RetryStrategy, nodes wfv1.Nodes, pod * func FindRetryNode(nodes wfv1.Nodes, nodeID string) *wfv1.NodeStatus { boundaryID := nodes[nodeID].BoundaryID boundaryNode := nodes[boundaryID] - if boundaryNode.TemplateName != "" { - templateName := boundaryNode.TemplateName - for _, node := range nodes { - if node.Type == wfv1.NodeTypeRetry && node.TemplateName == templateName { - return &node - } + for _, node := range nodes { + if node.Type != wfv1.NodeTypeRetry { + continue } - } - if boundaryNode.TemplateRef != nil { - templateRef := boundaryNode.TemplateRef - for _, node := range nodes { - if node.Type == wfv1.NodeTypeRetry && node.TemplateRef != nil && node.TemplateRef.Name == templateRef.Name && node.TemplateRef.Template == templateRef.Template { - return &node - } + if boundaryID == "" && node.HasChild(nodeID) { + return &node + } else if boundaryNode.TemplateName != "" && node.TemplateName == boundaryNode.TemplateName { + return &node + } else if boundaryNode.TemplateRef != nil && node.TemplateRef != nil && node.TemplateRef.Name == boundaryNode.TemplateRef.Name && node.TemplateRef.Template == boundaryNode.TemplateRef.Template { + return &node } } - return nil } diff --git a/workflow/metrics/metrics.go b/workflow/metrics/metrics.go index a61516b99a32..fd001c864e92 100644 --- a/workflow/metrics/metrics.go +++ b/workflow/metrics/metrics.go @@ -42,6 +42,8 @@ func (s ServerConfig) SameServerAs(other ServerConfig) bool { type metric struct { metric prometheus.Metric lastUpdated time.Time + realtime bool + completed bool } type Metrics struct { @@ -153,6 +155,23 @@ func (m *Metrics) StopRealtimeMetricsForKey(key string) { return } + realtimeMetrics := m.workflows[key] + for _, metric := range realtimeMetrics { + if realtimeMetric, ok := m.customMetrics[metric]; ok { + realtimeMetric.completed = true + m.customMetrics[metric] = realtimeMetric + } + } +} + +func (m *Metrics) DeleteRealtimeMetricsForKey(key string) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if _, exists := m.workflows[key]; !exists { + return + } + realtimeMetrics := m.workflows[key] for _, metric := range realtimeMetrics { delete(m.customMetrics, metric) @@ -190,7 +209,7 @@ func (m *Metrics) UpsertCustomMetric(key string, ownerKey string, newMetric prom } else { m.metricNameHelps[name] = help } - m.customMetrics[key] = metric{metric: newMetric, lastUpdated: time.Now()} + m.customMetrics[key] = metric{metric: newMetric, lastUpdated: time.Now(), realtime: realtime} // If this is a realtime metric, track it if realtime { diff --git a/workflow/metrics/metrics_test.go b/workflow/metrics/metrics_test.go index 73de88442346..746e927ca5a0 100644 --- a/workflow/metrics/metrics_test.go +++ b/workflow/metrics/metrics_test.go @@ -131,6 +131,53 @@ func TestMetricGC(t *testing.T) { assert.Len(t, m.customMetrics, 0) } +func TestRealtimeMetricGC(t *testing.T) { + config := ServerConfig{ + Enabled: true, + Path: DefaultMetricsServerPath, + Port: DefaultMetricsServerPort, + TTL: 1 * time.Second, + } + m := New(config, config) + assert.Len(t, m.customMetrics, 0) + + err := m.UpsertCustomMetric("realtime_metric", "workflow-uid", newCounter("test", "test", nil), true) + if assert.NoError(t, err) { + assert.Len(t, m.customMetrics, 1) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go m.garbageCollector(ctx) + + // simulate workflow is still running. + timeoutTime := time.Now().Add(time.Second * 2) + // Ensure we get at least one TTL run + for time.Now().Before(timeoutTime) { + // Break if we know our test will pass. + if len(m.customMetrics) == 0 { + break + } + // Sleep to prevent overloading test worker CPU. + time.Sleep(100 * time.Millisecond) + } + assert.Len(t, m.customMetrics, 1) + + // simulate workflow is completed. + m.StopRealtimeMetricsForKey("workflow-uid") + timeoutTime = time.Now().Add(time.Second * 2) + // Ensure we get at least one TTL run + for time.Now().Before(timeoutTime) { + // Break if we know our test will pass. + if len(m.customMetrics) == 0 { + break + } + // Sleep to prevent overloading test worker CPU. + time.Sleep(100 * time.Millisecond) + } + assert.Len(t, m.customMetrics, 0) +} + func TestWorkflowQueueMetrics(t *testing.T) { config := ServerConfig{ Enabled: true, @@ -171,7 +218,7 @@ func TestRealTimeMetricDeletion(t *testing.T) { assert.NotEmpty(t, m.workflows["123"]) assert.Len(t, m.customMetrics, 1) - m.StopRealtimeMetricsForKey("123") + m.DeleteRealtimeMetricsForKey("123") assert.Empty(t, m.workflows["123"]) assert.Len(t, m.customMetrics, 0) @@ -183,7 +230,7 @@ func TestRealTimeMetricDeletion(t *testing.T) { assert.Empty(t, m.workflows["456"]) assert.Len(t, m.customMetrics, 1) - m.StopRealtimeMetricsForKey("456") + m.DeleteRealtimeMetricsForKey("456") assert.Empty(t, m.workflows["456"]) assert.Len(t, m.customMetrics, 1) } diff --git a/workflow/metrics/server.go b/workflow/metrics/server.go index cde6ac4bf3b8..b8bc731fbe77 100644 --- a/workflow/metrics/server.go +++ b/workflow/metrics/server.go @@ -139,7 +139,12 @@ func (m *Metrics) garbageCollector(ctx context.Context) { case <-ticker.C: for key, metric := range m.customMetrics { if time.Since(metric.lastUpdated) > m.metricsConfig.TTL { - delete(m.customMetrics, key) + switch { + case metric.realtime && metric.completed: + delete(m.customMetrics, key) + case !metric.realtime: + delete(m.customMetrics, key) + } } } } diff --git a/workflow/util/util.go b/workflow/util/util.go index 7e7fda53f9f6..a51c63e1f8d8 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -454,7 +454,7 @@ func ResumeWorkflow(ctx context.Context, wfIf v1alpha1.WorkflowInterface, hydrat func SelectorMatchesNode(selector fields.Selector, node wfv1.NodeStatus) bool { nodeFields := fields.Set{ "displayName": node.DisplayName, - "templateName": node.TemplateName, + "templateName": GetTemplateFromNode(node), "phase": string(node.Phase), "name": node.Name, "id": node.ID, @@ -733,6 +733,9 @@ func FormulateResubmitWorkflow(ctx context.Context, wf *wfv1.Workflow, memoized newNode.Phase = wfv1.NodeSkipped newNode.Type = wfv1.NodeTypeSkipped newNode.Message = fmt.Sprintf("original pod: %s", originalID) + } else if newNode.Type == wfv1.NodeTypeSkipped && !isDescendantNodeSucceeded(wf, node, make(map[string]bool)) { + newWF.Status.Nodes.Delete(newNode.ID) + continue } else { newNode.Phase = wfv1.NodePending newNode.Message = "" @@ -971,7 +974,7 @@ func FormulateRetryWorkflow(ctx context.Context, wf *wfv1.Workflow, restartSucce log.Debugf("Reset %s node %s since it's a group node", node.Name, string(node.Phase)) continue } else { - if isDescendantNodeSucceeded(wf, node, nodeIDsToReset) { + if node.Type != wfv1.NodeTypeRetry && isDescendantNodeSucceeded(wf, node, nodeIDsToReset) { log.Debugf("Node %s remains as is since it has succeed child nodes.", node.Name) newWF.Status.Nodes.Set(node.ID, node) continue