diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 3d7643ad..ac8000e1 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -4,13 +4,25 @@ on: push: branches: - main + - 'release-*' + paths: + - 'stable/**' + +permissions: + contents: read jobs: release: + permissions: + contents: write # for helm/chart-releaser-action to push chart release and create a release + env: + REPO: "${{ github.repository }}" + REPO_NAME: "${{ github.event.repository.name }}" + TARGET_BRANCH: "${{ github.event.pull_request.base.ref }}" runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Fetch history run: git fetch --prune --unshallow @@ -20,8 +32,25 @@ jobs: git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + - name: Add Anchore Helm repo + run: | + helm repo add anchore https://charts.anchore.io/stable + helm repo add bitnami https://charts.bitnami.com/bitnami + + - name: Run chart-releaser not latest + if: github.event.pull_request.base.ref != 'main' + uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0 + with: + charts_dir: stable + charts_repo_url: https://charts.anchore.io + mark_as_latest: false + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + CR_MAKE_RELEASE_LATEST: "false" + - name: Run chart-releaser - uses: helm/chart-releaser-action@v1.0.0 + if: github.event.pull_request.base.ref == 'main' + uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0 with: charts_dir: stable charts_repo_url: https://charts.anchore.io diff --git a/.github/workflows/helm-unittests.yaml b/.github/workflows/helm-unittests.yaml new file mode 100644 index 00000000..6e7ced18 --- /dev/null +++ b/.github/workflows/helm-unittests.yaml @@ -0,0 +1,57 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +name: 'Helm Unit Tests' + +on: + pull_request: + paths: + - 'stable/enterprise/Chart.yaml' + - 'stable/feeds/Chart.yaml' + +# Remove all permissions by default +permissions: {} +jobs: + helm-unittests: + permissions: + contents: read # for helm-unittest to read the chart + env: + REPO: "${{ github.repository }}" + REPO_NAME: "${{ github.event.repository.name }}" + TARGET_BRANCH: "${{ github.event.pull_request.base.ref }}" + HELM_UNITTEST_VERSION: 0.3.5 + runs-on: ubuntu-latest + steps: + - name: Checkout charts + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + path: ${{ github.repository }} + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Helm + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 + with: + version: v3.8.0 + + - name: Install helm-unittest + run: | + helm plugin install https://github.com/helm-unittest/helm-unittest.git --version ${HELM_UNITTEST_VERSION} + + - run: cd "${REPO}" && git fetch origin ${TARGET_BRANCH} + + - name: Execute helm-unittests + run: | + cd "${REPO}" + files_changed="$(git diff --name-only origin/${TARGET_BRANCH} | sort | uniq)" + # Adding || true to avoid "Process exited with code 1" errors + charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "stable/[^/]*" | sort | uniq || true)" + for chart in ${charts_dirs_changed}; do + echo "running helm unittest . for ${chart}" + pushd "${chart}" + helm repo add anchore https://charts.anchore.io/stable + helm dep up + helm unittest . + popd + done diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 00000000..81b8fcef --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,45 @@ +name: "Lint changed charts using chart-testing tool" + +on: + pull_request: + paths: + - 'stable/**' + +permissions: + contents: read + +jobs: + lint: + strategy: + fail-fast: false + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + + - name: Fetch history + run: git fetch --prune --unshallow + + - name: Shellcheck + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 + + - name: Set up chart-testing + uses: helm/chart-testing-action@b43128a8b25298e1e7b043b78ea6613844e079b1 # v2.7.0 + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --config 'ct-config.yaml' --target-branch ${{ github.event.pull_request.base.ref }}) + if [[ -n "$changed" ]]; then + echo "CHANGED=true" >> "$GITHUB_OUTPUT" + fi + + - name: Run chart-testing (lint) + id: lint + run: ct lint --config 'ct-config.yaml' + if: steps.list-changed.outputs.CHANGED == 'true' && github.event.pull_request.base.ref == 'main' + + - name: Run chart-testing but skip version check (lint) + id: lintskipversion + run: ct lint --config 'ct-config.yaml' --check-version-increment=false + if: steps.list-changed.outputs.CHANGED == 'true' && github.event.pull_request.base.ref != 'main' diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml new file mode 100644 index 00000000..64fcde50 --- /dev/null +++ b/.github/workflows/scorecards.yml @@ -0,0 +1,71 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '20 7 * * 2' + push: + branches: ["main"] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + contents: read + actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # v2.0.6 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecards on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@04daf014b50eaf774287bf3f0f1869d4b4c4b913 # v2.21.7 + with: + sarif_file: results.sarif diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index de7cfbcf..bc7e2fa6 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,45 +1,102 @@ name: "Test using kind and chart-testing tool" on: - - pull_request + pull_request: + paths: + - 'stable/**' + +permissions: + contents: read jobs: test: strategy: fail-fast: false matrix: - kubernetesVersion: ["v1.14.10", "v1.22.0"] + kubernetesVersion: ["v1.24.17", "v1.25.16", "v1.26.14", "v1.27.11", "v1.28.7", "v1.29.2"] runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Fetch history run: git fetch --prune --unshallow - name: Shellcheck - uses: ludeeus/action-shellcheck@1.0.0 + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 + + - uses: actions/setup-python@e9aba2c848f5ebd159c070c61ea2c4e2b122355e # v2.3.4 + with: + python-version: 3.7 + + - name: Set up Helm + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 + with: + version: v3.8.0 + + - name: Set up chart-testing + uses: helm/chart-testing-action@b43128a8b25298e1e7b043b78ea6613844e079b1 # v2.7.0 + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --config 'ct-config.yaml' --target-branch ${{ github.event.pull_request.base.ref }}) + if [[ -n "$changed" ]]; then + echo "CHANGED=true" >> "$GITHUB_OUTPUT" + fi - name: Run chart-testing (lint) id: lint - uses: helm/chart-testing-action@v1.1.0 - with: - command: lint - config: ct-config.yaml + run: ct lint --config 'ct-config.yaml' + if: steps.list-changed.outputs.CHANGED == 'true' && github.event.pull_request.base.ref == 'main' + + - name: Run chart-testing but skip version check (lint) + id: lintskipversion + run: ct lint --config 'ct-config.yaml' --check-version-increment=false + if: steps.list-changed.outputs.CHANGED == 'true' && github.event.pull_request.base.ref != 'main' - name: Install kind - uses: helm/kind-action@v1.2.0 + if: steps.list-changed.outputs.CHANGED == 'true' + uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0 with: node_image: "kindest/node:${{ matrix.kubernetesVersion }}" config: kind-config.yaml wait: 600s - if: steps.lint.outputs.changed == 'true' - name: Check kind nodes + if: steps.list-changed.outputs.CHANGED == 'true' run: kubectl describe nodes - - name: Run chart-testing (install) - uses: helm/chart-testing-action@v1.1.0 - with: - command: install - config: ct-config.yaml + - name: Create pullcreds and license secrets + if: steps.list-changed.outputs.CHANGED == 'true' + run: | + kubectl create namespace anchore + echo "${ANCHORE_LICENSE}" | base64 --decode > /tmp/anchore-license + kubectl --namespace anchore create secret generic anchore-enterprise-license --from-file=license.yaml=/tmp/anchore-license + kubectl --namespace anchore create secret docker-registry anchore-enterprise-pullcreds --docker-server=docker.io --docker-username="${DOCKER_USER}" --docker-password="${DOCKER_PASS}" + env: + ANCHORE_LICENSE: ${{ secrets.B64_ANCHORE_LICENSE }} + DOCKER_USER: ${{ secrets.ANCHOREREADONLY_DH_USERNAME }} + DOCKER_PASS: ${{ secrets.ANCHOREREADONLY_DH_PAT }} + + - name: Check if anchore-engine endpoint is required for admission controller chart + id: engine_required + run: | + if [[ -n $(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ github.sha }} | grep 'anchore-admission-controller') ]]; then + echo "File in the 'stable/anchore-admission-controller' directory was changed. We need an engine deployment" + echo "::set-output name=changed::true" + else + echo "No files in 'stable/anchore-admission-controller' directory were changed. Skipping engine deployment" + echo "::set-output name=changed::false" + fi + shell: bash + + - name: Deploy Engine + if: steps.engine_required.outputs.changed == 'true' + run: | + helm install engine anchore/anchore-engine --namespace anchore --wait + kubectl --namespace anchore get pods + + - name: Run chart-testing + if: steps.list-changed.outputs.CHANGED == 'true' + run: ct install --config ct-config.yaml --helm-extra-args "--timeout 600s" diff --git a/.github/workflows/values-converter-docker.yaml b/.github/workflows/values-converter-docker.yaml new file mode 100644 index 00000000..c7290a72 --- /dev/null +++ b/.github/workflows/values-converter-docker.yaml @@ -0,0 +1,34 @@ +name: Build & push values converter docker image + +on: + push: + branches: + - main + paths: + - 'scripts/enterprise-value-converter/**' + +permissions: + contents: read + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + + - name: Login to DockerHub + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + with: + username: ${{ secrets.ANCHOREDEVWRITE_DH_USERNAME }} + password: ${{ secrets.ANCHOREDEVWRITE_DH_PAT }} + + - name: Build and push + id: docker_build + uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + with: + context: "{{defaultContext}}:scripts/enterprise-value-converter" + push: true + tags: | + anchore/enterprise-helm-migrator:${{ github.sha }} + anchore/enterprise-helm-migrator:latest diff --git a/.github/workflows/values-converter-tests.yaml b/.github/workflows/values-converter-tests.yaml new file mode 100644 index 00000000..8fd03d6a --- /dev/null +++ b/.github/workflows/values-converter-tests.yaml @@ -0,0 +1,31 @@ +name: Run values converter unit tests + +on: + pull_request: + paths: + - 'scripts/enterprise-value-converter/**.py' + +permissions: + contents: read + +jobs: + tests: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + + - name: Set up Python + uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 + with: + python-version: 3.9 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install argparse pyyaml + + - name: Run tests + run: | + cd scripts/enterprise-value-converter + python -m unittest tests/*.py diff --git a/.gitignore b/.gitignore index 20b01795..24be4a11 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ +__pycache__ examples/ *.tgz charts/ -.idea/ \ No newline at end of file +.idea/ +*.code-workspace diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..2118a205 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,28 @@ +repos: +# - repo: local +# hooks: +# - id: helm-unittest +# name: helm-unittest +# entry: scripts/hooks/helm-unittest.sh +# language: script +# files: ^stable/(enterprise|feeds)/templates/.*$ +- repo: local + hooks: + - id: readme-generator + name: readme-generator + entry: scripts/hooks/readme-generator.sh + language: script + files: ^stable/(enterprise|feeds|ecs-inventory)/values\.yaml$ +- repo: https://github.com/gitleaks/gitleaks + rev: v8.16.3 + hooks: + - id: gitleaks +- repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: shellcheck +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..8262433a --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @anchore/devops diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1aa66080..e3b516f1 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -10,15 +10,15 @@ way to contribute to the Anchore project. The process is to certify the below DCO 1.1 text :: - + Developer's Certificate of Origin 1.1 - + By making a contribution to this project, I certify that: - + (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or - + (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that @@ -26,11 +26,11 @@ The process is to certify the below DCO 1.1 text by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or - + (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. - + (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is diff --git a/README.md b/README.md index 3e04d2e6..5e291ccf 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,68 @@ -# Anchore Charts +# Anchore Helm Charts -A collection of anchore charts for tooling and integrations. The charts in this repository are available from the Anchore Charts Repository at: +This repository contains Helm charts for deploying [Anchore](https://www.anchore.com/) software on Kubernetes. -https://charts.anchore.io +## Prerequisites -## Installing Charts -``` -$ helm repo add anchore https://charts.anchore.io -$ helm search repo anchore -$ helm install my-release anchore/ +- [Helm](https://helm.sh/) (>=3.8) - Helm is a package manager for Kubernetes that makes it easy to install and manage applications on your cluster. +- [Kubernetes](https://kubernetes.io/) (>=1.23) - Kubernetes is an open-source container orchestration platform that is required to use Helm charts. +- [pre-commit](https://pre-commit.com/) (>=3.4) - pre-commit is a tool that is used to ensure that all commits to the repository are properly formatted. It is not required to use the charts in this repository, but it is recommended to install it to ensure that your commits are properly formatted. + +## Installation + +To use the charts in this repository, you will need to add it to your Helm repositories list. You can do this using the `helm repo add` command: + +```bash +helm repo add anchore https://charts.anchore.io ``` +Once the repository has been added, you can use the `helm search` command to view a list of available charts: -## Contributing +```bash +helm search repo anchore +``` -All commits must be signed with the DCO as defined in [CONTRIBUTING](CONTRIBUTING.rst) +To install a chart, use the `helm install` command and specify the chart name and any required values: -In git this can be done using the '-s' flag on commit. +```bash +RELEASE_NAME="my-release" +CHART_NAME="anchore/enterprise" -To test changes made to this chart, you must also synchronize the dependencies of the chart itself. -For example, for anchore-engine: +helm install "$RELEASE_NAME" "$CHART_NAME" --values values.yaml ``` -helm dep up + +### Installing from source + +It can be useful when developing to install a chart directly from the source code. To do this you must first download all dependent charts, then you are able to install from the chart directory. + +```bash +RELEASE_NAME="my-release" +CHART_PATH="anchore-charts/stable/enterprise" + +git clone https://github.com/anchore/anchore-charts.git +cd "$CHART_PATH" +helm dependency up +helm install "$RELEASE_NAME" . --values values.yaml ``` -is needed. \ No newline at end of file + +## Configuration + +The charts in this repository include a number of configuration options that can be set using the `--values` flag when installing the chart. For a full list of configuration options, see the chart's `values.yaml` file. + +## Contributing + +We welcome contributions to the Anchore Helm charts repository. If you have a chart change that you would like to share, please submit a pull request with your change and any relevant documentation. + +All commits must be signed with the DCO as defined in [CONTRIBUTING](./CONTRIBUTING.rst). In git this can be done using the '-s' flag on commit. + +## Testing + +This project uses GitHub Actions and the [Helm Chart Testing](https://github.com/helm/chart-testing) tool to test chart changes. When a pull request is opened, the testing workflow will run to ensure that the charts are properly formatted and can be installed on a Kubernetes cluster. + +All charts are tested against a range of Kubernetes versions. This version range roughly tracks the supported versions available from the major cloud vendors and is close, but not exactly the same as, the Kubernetes support N-3 approach. + +We aim to have at least the .0 patches for the releases for predictability and stability of the tests so that they do not have to change with each patch update. However, specific patches may be chosen for compatibility with the test harness (kindest/node) and if there is a specific bug fixed in a K8s release that has material impact on the results of a chart test. + +## Support + +If you have any questions or need assistance with the charts in this repository, please visit the [Anchore documentation](https://docs.anchore.com/) or contact the Anchore support team through the [Anchore support site](https://support.anchore.com/hc/en-us). diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 00000000..ff778203 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,88 @@ +# Releasing Anchore Helm Charts + +In our Helm chart release strategy we have developed a pipeline to accommodate current and non-current versions of our enterprise software. To accomplish this, we are utilizing multiple release branches with distinct release pipelines. The `release--` branching strategy involves a release process that is dedicated to the ongoing support of non-current, but still supported, Enterprise platform versions. The `main` branch is dedicated to the ongoing support of current & future Enterprise platform versions. + +All release processes are controlled via CI using GitHub actions. Charts are linted and released using the official Helm [chart-testing](https://github.com/helm/chart-testing) and [chart-releaser](https://github.com/helm/chart-releaser) tools. + +## Release Documentation + +*Note: Ensure that Helm and GitHub credentials are configured appropriately for successful execution of the release process.* + +### Release Process for Current Charts + +1. **Create a Branch:** + - Start by creating a new branch for your changes off of the `main` branch. + + ```bash + git checkout main + git pull origin main + git checkout -b feature-update + ``` + +2. **Make and Commit Changes:** + - Implement your changes in the branch. + - Ensure Helm unit tests are updated and passing. + - Commit your changes. + - Push your branch to GitHub + + ```bash + git commit -sm "feat: implement updates" + helm unittest . + git push origin feature-update + ``` + +3. **Create Pull Request:** + - Open a pull request against the `main` branch on GitHub. + - Provide a descriptive title and description for the changes. + +4. **GitHub Actions and Chart Releasing:** + - Once the pull request is merged, GitHub Actions will automatically trigger the `chart-releaser-action`. + - This action will create the Helm release for the updated chart. + +### Release Process for Non-Current Charts + +1. **Branching:** + - Create a new release branch off the latest v1.x.x tag named `release-enterprise-1.x.x`. + - Push the release branch to GitHub. + - Create a new branch off of the `release-enterprise-1.x.x` branch using a meaningful name for your changes. + - Example: + + ```bash + git checkout v1.0.0 + git checkout -b release-enterprise-1.x.x + git push origin release-enterprise-1.x.x + git checkout -b enterprise-1.x.x-feature-xyz + ``` + +2. **Make and Commit Changes:** + - Make necessary changes in your branch. + - Ensure that any affected Helm unit tests are updated and passing. + - Commit your changes. + - Example: + + ```bash + git commit -sm "feat: update something" + helm unittest . + git push origin enterprise-1.x.x-feature-xyz + ``` + +3. **Create Pull Request:** + - Open a pull request against the `release-enterprise-1.x.x` branch on GitHub. + - Provide a concise and informative title and description for your changes. + +4. **GitHub Actions and Chart Releasing:** + - Upon merging the pull request, a GitHub Action (`chart-releaser-action`) will be triggered automatically. + - The action will create the Helm release for the updated chart. + +5. **Managing GitHub Releases Page:** + - Navigate to the GitHub Releases page. + - Locate the latest release associated with the `enterprise-1.x.x` branch and make sure its not marked as latest. If it is, manually mark the actual latest release as 'latest.' + - Note: This step is crucial to distinguish the actual latest release from patch updates. + +## Chart Distribution + +Our Helm charts are distributed via GitHub Pages and managed using the `gh-pages` branch in this repository. The chart repository is available at . The `chart-releaser-action` will automatically perform the following actions when a PR is merged to the `main` OR `release-*` branches: + +- Create a GitHub tag & release for all changed charts, using `-` as the tag name. +- Package the chart and upload the created tarball to the corresponding GitHub release page. +- Updates the `chart.yaml` file in the `gh-pages` branch with the latest chart version & package location. diff --git a/build.sh b/build.sh index 398a6d46..31a082f2 100755 --- a/build.sh +++ b/build.sh @@ -32,4 +32,3 @@ if [ "${do_push}" == "true" ]; then else echo "Skipping push" fi - diff --git a/ct-config.yaml b/ct-config.yaml index bce61731..bb18e351 100644 --- a/ct-config.yaml +++ b/ct-config.yaml @@ -3,4 +3,7 @@ target-branch: main chart-dirs: - stable chart-repos: - - bitnami=https://charts.bitnami.com/bitnami \ No newline at end of file + - anchore=https://charts.anchore.io/stable + - bitnami=https://charts.bitnami.com/bitnami +namespace: anchore +release-label: anchore diff --git a/scripts/enterprise-value-converter/Dockerfile b/scripts/enterprise-value-converter/Dockerfile new file mode 100644 index 00000000..f5357ba5 --- /dev/null +++ b/scripts/enterprise-value-converter/Dockerfile @@ -0,0 +1,14 @@ +# python v3.11.5 +FROM python:3@sha256:cc7372fe4746ca323f18c6bd0d45dadf22d192756abc5f73e39f9c7f10cba5aa + +WORKDIR /app + +COPY helpers.py /app/ +COPY convert.py /app/ +COPY mappings.py /app/ + +RUN pip install argparse pyyaml + +ENTRYPOINT ["python3", "convert.py"] + +CMD ["-e", "values.yaml"] diff --git a/scripts/enterprise-value-converter/README.md b/scripts/enterprise-value-converter/README.md new file mode 100644 index 00000000..bfe9ba91 --- /dev/null +++ b/scripts/enterprise-value-converter/README.md @@ -0,0 +1,52 @@ +# Anchore Engine to Enterprise Helm Chart Value File Converter + +This script converts the values file of Anchore Engine to the values file format suitable for the Anchore Enterprise Helm chart. + +## Prerequisites + +- Docker: Make sure you have Docker installed on your machine. + +## Usage + +1. **The Docker Image**: + To build the docker image yourself, from the `scripts/enterprise-value-converter` directory, build the Docker image using the following command: + + ```bash + docker build -t script-container . + ``` + + Alternatively, a docker image is available at `docker.io/anchore/enterprise-helm-migrator:latest` + +2. **Run the Docker Container**: + + Run the Docker container with the following command. Change the name of the file as needed: + + ```bash + export VALUES_FILE_NAME=my-values-file.yaml + docker run -v ${PWD}:/tmp -v ${PWD}/${VALUES_FILE_NAME}:/app/${VALUES_FILE_NAME} docker.io/anchore/enterprise-helm-migrator:latest -e /app/${VALUES_FILE_NAME} -d /tmp/output + ``` + + This command mounts a local volume to store the output files and mounts the input file to be converted, and passes it using the `-e` flag. + +3. **Retrieve Output**: + + After running the Docker container, the converted Helm chart values file will be available in the `${PWD}/output` directory on your local machine. + +## Running tests + +To run the unit tests, run the following command from the `scripts/enterprise-value-converter` directory: + +```bash +pip install argparse pyyaml +python -m unittest tests/*.py +``` + +## Important Note + +Please ensure that you have reviewed and understood the content of the input file before running the conversion. The script provided is specifically tailored to convert Anchore Engine values files to the format expected by the Anchore Enterprise Helm chart. + +## Disclaimer + +This script is provided as-is and is intended to help reduce the friction of converting from anchore-engine to enterprise. It is your responsibility to ensure that any modifications or usage of the script align with your requirements and best practices. + +For any issues or suggestions related to the script or Docker image, feel free to create an issue or pull request in this repository. diff --git a/scripts/enterprise-value-converter/convert.py b/scripts/enterprise-value-converter/convert.py new file mode 100644 index 00000000..579c96a8 --- /dev/null +++ b/scripts/enterprise-value-converter/convert.py @@ -0,0 +1,28 @@ +# Entry point for the enterprise value converter script +# This script is used to convert the values files from the anchore-engine chart to the enterprise values files + +import sys +sys.dont_write_bytecode = True + +import argparse +from helpers import convert_values_file + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Ingests one values files, changes the keys based on a declared map, then spits out a different values file") + parser.add_argument( + "-e", "--engine-file", + type=str, + help="Path to the original values file being ingested", + default="" + ) + parser.add_argument( + "-d", "--results-dir", + type=str, + help="directory to put resulting files in", + default="enterprise-values" + ) + + args = parser.parse_args() + engine_file = args.engine_file + results_dir = args.results_dir + convert_values_file(file=engine_file, results_dir=results_dir) diff --git a/scripts/enterprise-value-converter/helpers.py b/scripts/enterprise-value-converter/helpers.py new file mode 100644 index 00000000..3175cb72 --- /dev/null +++ b/scripts/enterprise-value-converter/helpers.py @@ -0,0 +1,300 @@ +import copy +import os +import pathlib +import shutil +import yaml + +from mappings import ( + KEYS_WITHOUT_CHANGES, + KUBERNETES_KEYS, + TOP_LEVEL_MAPPING, + FULL_CHANGE_KEY_MAPPING, LEVEL_TWO_CHANGE_KEY_MAPPING, LEVEL_THREE_CHANGE_KEY_MAPPING, + DEPENDENCY_CHARTS, + ENTERPRISE_ENV_VAR_MAPPING, FEEDS_ENV_VAR_MAPPING, + DEPRECATED_KEYS, CHECK_LAST, + POST_PROCESSING +) + +def represent_block_scalar(dumper, data): + style = "|" if "\n" in data else '"' + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=style) + +def convert_to_str(env_var): + if isinstance(env_var, dict): + if not isinstance(env_var.get('value'), str): + env_var['value'] = str(env_var.get('value')) + else: + return str(env_var) + +def convert_values_file(file, results_dir): + file_name = os.path.basename(file) + prep_dir(path=results_dir, clean=True) + + with open(file, 'r') as content: + parsed_data = yaml.safe_load(content) + + dot_string_dict = dict_keys_to_dot_string(parsed_data) + write_to_file(data=str("\n".join(f"{key} = {val}" for key, val in dot_string_dict.items())), output_file=os.path.join(results_dir, "dotstring.txt"), write_mode="w") + + enterprise_chart_values_dict, enterprise_chart_env_var_dict = replace_keys_with_mappings(dot_string_dict, results_dir) + + for key, val in enterprise_chart_env_var_dict.items(): + if isinstance(val, list): + for index, env_var in enumerate(val): + val[index] = convert_to_str(env_var) or env_var + elif isinstance(val, dict): + for index, env_var in enumerate(val.get("extraEnv", [])): + val["extraEnv"][index] = convert_to_str(env_var) or env_var + + # taking the environment variables and adding it into the enterprise_chart_values_dict to make one dictionary + if key not in enterprise_chart_values_dict: + val_type = type(val) + enterprise_chart_values_dict[key] = val_type() + if isinstance(val, list): + enterprise_chart_values_dict[key] = enterprise_chart_values_dict[key] + val + elif isinstance(val, dict): + enterprise_chart_values_dict[key] = enterprise_chart_values_dict.get(key, {}) + enterprise_chart_values_dict[key]["extraEnv"] = enterprise_chart_values_dict[key].get("extraEnv", []) + enterprise_chart_values_dict[key]["extraEnv"] = enterprise_chart_values_dict[key]["extraEnv"] + val.get("extraEnv", []) + + # for the current bitnami postgres chart, if your user is specifically the 'postgres' admin user, you need to override global.postgresql.auth.postgresPassword + if (enterprise_chart_values_dict.get('postgresql', {}).get('auth', {}).get('username') == 'postgres') and (enterprise_chart_values_dict.get('postgresql', {}).get('auth', {}).get('password')): + enterprise_chart_values_dict['postgresql']['auth']['postgresPassword'] = enterprise_chart_values_dict['postgresql']['auth']['password'] + + yaml.add_representer(str, represent_block_scalar) + yaml_data = yaml.dump(enterprise_chart_values_dict, default_flow_style=False) + file_name = f"enterprise.{file_name}" + write_to_file(data=yaml_data, output_file=os.path.join(results_dir, file_name), write_mode="w") + +def write_to_file(data, output_file, write_mode='w'): + file_parent_dir = pathlib.Path(output_file).parent + prep_dir(file_parent_dir) + with open(f"{output_file}", write_mode) as file: + file.write(data) + return f"{output_file}" + +def prep_dir(path, clean=False): + if clean: + if pathlib.Path(path).is_dir(): + shutil.rmtree(path) + if not pathlib.Path(path).is_dir(): + pathlib.Path(path).mkdir(parents=True, exist_ok=True) + return path + +# return as the first return value, a dictionary where the keys are dot string representation of the old keys and +# the value is the original values +def dict_keys_to_dot_string(dictionary, prefix=''): + result = {} + for key, value in dictionary.items(): + full_key = f'{prefix}.{key}' if prefix else key + if isinstance(value, dict) and bool(value): + sub_dict = dict_keys_to_dot_string(value, full_key) + result.update(sub_dict) + else: + result[full_key] = value + return result + +# returns the resulting dictionary that will be used to create the new values file +def replace_keys_with_mappings(dot_string_dict, results_dir): + result = {} + env_var_results = {} + keys_without_changes = KEYS_WITHOUT_CHANGES + top_level_mapping = TOP_LEVEL_MAPPING + kubernetes_keys = KUBERNETES_KEYS + full_change_key_mapping = FULL_CHANGE_KEY_MAPPING + + level_two_change_key_mapping = LEVEL_TWO_CHANGE_KEY_MAPPING + level_three_change_key_mapping = LEVEL_THREE_CHANGE_KEY_MAPPING + + enterprise_env_var_mapping = ENTERPRISE_ENV_VAR_MAPPING + feeds_env_var_mapping = FEEDS_ENV_VAR_MAPPING + deprecated_keys = DEPRECATED_KEYS + dependency_charts_keys = DEPENDENCY_CHARTS + check_last = CHECK_LAST + post_processing = POST_PROCESSING + + env_var_mapping = {**enterprise_env_var_mapping, **feeds_env_var_mapping} + logs_dir = f"{results_dir}/logs" + if not dot_string_dict.get("postgresql.postgresUser"): + log_file_name = "info.log" + write_to_file(f"setting postgres user as anchoreengine as one was not set and this value was changed in enterprise.\n", os.path.join(logs_dir, log_file_name), "a") + dot_string_dict["postgresql.postgresUser"] = "anchoreengine" + if not dot_string_dict.get("anchoreGlobal.hashedPasswords"): + log_file_name = "warning.log" + write_to_file(f"hashedPasswords is not currently used. You should _really_ consider using it. Please see docs on how to migrate to hashed passwords.\n", os.path.join(logs_dir, log_file_name), "a") + dot_string_dict["anchoreGlobal.hashedPasswords"] = False + for dotstring_key, val in dot_string_dict.items(): + keys = dotstring_key.split('.') + + if deprecated_keys.get(dotstring_key): + log_file_name = "warning.log" + write_to_file(f"{dotstring_key}: no longer used\n", os.path.join(logs_dir, log_file_name), "a") + continue + + # serviceName.service.annotations + if len(keys) > 2 and keys[2] in ['annotations', 'labels']: + if val != {}: + val = { + '.'.join(keys[3:]): val + } + keys = keys[:3] + + # serviceName.annotations + elif len(keys) > 1 and keys[1] in ['annotations', 'labels', 'nodeSelector', 'deploymentAnnotations']: + if val != {}: + val = { + '.'.join(keys[2:]): val + } + keys = keys[:2] + + update_result = False + errored = True + + if dotstring_key in post_processing: + pp_val = post_processing.get(dotstring_key) + action = pp_val.get("action") + if action == "split_value": + delimeter = pp_val.get("split_on") + new_vals = val.split(delimeter) + new_keys = pp_val.get("new_keys") + combined_dict = dict(zip(new_keys, new_vals)) + for new_key, new_val in combined_dict.items(): + dict_key = create_dict_entry(new_key, new_val) + result = merge_dicts(result, dict_key) + continue + elif action == "merge": + merge_keys = pp_val.get("merge_keys") + merged_val = [] + for merge_key in merge_keys: + merged_val.append(dot_string_dict.get(merge_key)) + merged_val = ":".join(merged_val) + + dotstring_key = pp_val.get("new_key") + dict_key = create_dict_entry(dotstring_key, merged_val) + result = merge_dicts(result, dict_key) + continue + elif action == "duplicate": + new_keys = pp_val.get("new_keys") + for dotstring_key in new_keys: + dict_key = create_dict_entry(dotstring_key, copy.deepcopy(val)) + result = merge_dicts(result, dict_key) + continue + elif action == "key_addition": + new_keys = pp_val.get("new_keys") + for new_key in new_keys: + key = new_key[0] + value = new_key[1] + if value == "default": + value = val + dict_key = create_dict_entry(key, value) + result = merge_dicts(result, dict_key) + continue + + if not update_result: + if full_change_key_mapping.get(dotstring_key): + dotstring_key = full_change_key_mapping.get(dotstring_key) + update_result = True + elif len(keys) > 1: + level_three_replacement = False + if len(keys) > 2: + level_three_replacement = level_three_change_key_mapping.get(f"{keys[0]}.{keys[1]}.{keys[2]}", False) + level_two_replacement = level_two_change_key_mapping.get(f"{keys[0]}.{keys[1]}", False) + top_level_key = top_level_mapping.get(f"{keys[0]}", False) + + if level_three_replacement: + # replace the first three keys of the original + dotstring_key = create_new_dotstring(keys=keys, dotstring=level_three_replacement, level=3) + update_result = True + # if its not a level 3 replacement, check if its a level 2 replacement + elif level_two_replacement: + dotstring_key = create_new_dotstring(keys=keys, dotstring=level_two_replacement, level=2) + update_result = True + elif top_level_key and (f"{keys[1]}" in kubernetes_keys): + keys[0] = top_level_key + dotstring_key = ".".join(keys) + update_result = True + + if not update_result: + if env_var_mapping.get(dotstring_key): + extra_environment_variable = env_var_mapping.get(dotstring_key) + + environment_variable_name = extra_environment_variable.split(".")[-1] + service_name = "" + if len(extra_environment_variable.split(".")) > 1: + service_name = extra_environment_variable.split(".")[0] + + message = f"{dotstring_key} is now an environment variable: {environment_variable_name}" + log_file_name = "alert.log" + write_to_file(f"{message}\n", os.path.join(logs_dir, log_file_name), "a") + + env_dict = {"name": environment_variable_name, "value": val} + + if service_name != "": + env_var_results[service_name] = env_var_results.get(service_name, {}) + if env_var_results[service_name].get("extraEnv"): + env_var_results[service_name]["extraEnv"].append(env_dict) + else: + env_var_results[service_name]["extraEnv"] = [env_dict] + else: + env_var_results["extraEnv"] = env_var_results.get("extraEnv", []) + env_var_results["extraEnv"].append(env_dict) + continue + + elif f"{keys[0]}" in keys_without_changes: + log_file_name = "info.log" + write_to_file(f"{dotstring_key}: being carried over directly because there should be no changes\n", os.path.join(logs_dir, log_file_name), "a") + update_result = True + elif dependency_charts_keys.get(f"{keys[0]}"): + new_dep_key = dependency_charts_keys.get(f"{keys[0]}") + log_file_name = "dependency-chart-alert.log" + write_to_file(f"{dotstring_key}: {keys[0]} changed to {new_dep_key} but inner keys should be checked.\n", os.path.join(logs_dir, log_file_name), "a") + keys[0] = new_dep_key + dotstring_key = ".".join(keys) + update_result = True + elif f"{keys[0]}" in check_last: + keys.pop(0) + dotstring_key = ".".join(keys) + update_result = True + + if update_result: + dict_key = create_dict_entry(dotstring_key, val) + result = merge_dicts(result, dict_key) + elif errored: + if dotstring_key.split('.')[0] in deprecated_keys: + message = f"{dotstring_key}: not found. likely deprecated.\n" + else: + message = f"{dotstring_key}: not found.\n" + log_file_name = "error.log" + write_to_file(message, os.path.join(logs_dir, log_file_name), "a") + return result, env_var_results + +def create_new_dotstring(keys: list, dotstring: str, level: int) -> str: + new_keys = dotstring.split(".") + new_keys.extend(keys[level:]) + dotstring_key = ".".join(new_keys) + return dotstring_key + +def create_dict_entry(dotstring, value): + result = {} + current_dict = result + keys = dotstring.split('.') + + for index, key in enumerate(keys): + if index == len(keys) - 1: + current_dict[key] = value + else: + # creates the key with an empty map as a value because theres more to come + current_dict[key] = {} + current_dict = current_dict[key] + return result + +def merge_dicts(dict1, dict2): + merged_dict = dict1.copy() + + for key, value in dict2.items(): + if key in merged_dict and isinstance(merged_dict[key], dict) and isinstance(value, dict): + merged_dict[key] = merge_dicts(merged_dict[key], value) + else: + merged_dict[key] = value + + return merged_dict diff --git a/scripts/enterprise-value-converter/mappings.py b/scripts/enterprise-value-converter/mappings.py new file mode 100644 index 00000000..c004a310 --- /dev/null +++ b/scripts/enterprise-value-converter/mappings.py @@ -0,0 +1,333 @@ +# If we see this as first level, just skip them +KEYS_WITHOUT_CHANGES = { + "cloudsql", + "ingress" +} + +# check this last. If this is the last thing, and it starts with this, drop the key. eg anchoreGlobal.something -> something +CHECK_LAST = { + "anchoreEnterpriseGlobal", + "anchoreGlobal" +} + +# if first level in dep charts, and no matches in any of mapping, log to file +DEPENDENCY_CHARTS = { + "anchore-feeds-db": "feeds-db", + "anchore-feeds-gem-db": "gem-db", + "anchore-ui-redis": "ui-redis", + "postgresql": "postgresql", + "ui-redis": "ui-redis" +} + +# if second key is in this list, replace first key with the value from TOP_LEVEL_MAPPING +KUBERNETES_KEYS = { + "affinity", + "annotations", + "deploymentAnnotations", + "extraEnv", + "labels", + "nodeSelector", + "replicaCount", + "resources", + "service", + "tolerations", + "serviceAccountName" +} +TOP_LEVEL_MAPPING = { + "anchore-feeds-db": "feeds.feeds-db", + "anchore-feeds-gem-db": "feeds.gem-db", + "anchore-ui-redis": "ui-redis", + "anchoreAnalyzer": "analyzer", + "anchoreApi": "api", + "anchoreCatalog": "catalog", + "anchoreEnterpriseEngineUpgradeJob": "upgradeJob", + "anchoreEnterpriseFeeds": "feeds", + "anchoreEnterpriseFeedsUpgradeJob": "feeds.feedsUpgradeJob", + "anchoreEnterpriseNotifications": "notifications", + "anchoreEnterpriseRbac": "rbacManager", + "anchoreEnterpriseReports": "reports", + "anchoreEnterpriseUi": "ui", + "anchorePolicyEngine": "policyEngine", + "anchoreSimpleQueue": "simpleQueue", + "ingress": "ingress" +} + +LEVEL_TWO_CHANGE_KEY_MAPPING = { + "anchore-feeds-db.externalEndpoint": "feeds.feeds-db.externalEndpoint", + "anchoreEnterpriseUi.customLinks": "anchoreConfig.ui.custom_links", + "anchoreEnterpriseUi.enableAddRepositories": "anchoreConfig.ui.enable_add_repositories", + "anchoreEnterpriseFeeds.url": "feeds.url", + ######################################################################## + ################ TEST configfile, set malware stuff #################### + ######################################################################## + "anchoreAnalyzer.configFile": "anchoreConfig.analyzer.configFile", + "anchoreApi.external": "anchoreConfig.apiext.external", + "anchoreCatalog.analysis_archive": "anchoreConfig.catalog.analysis_archive", + "anchoreCatalog.cycleTimers": "anchoreConfig.catalog.cycle_timers", + "anchoreCatalog.events": "anchoreConfig.catalog.event_log", + "anchoreCatalog.object_store": "anchoreConfig.catalog.object_store", + "anchoreEnterpriseEngineUpgradeJob.enabled": "upgradeJob.enabled", + "anchoreEnterpriseFeeds.cycleTimers": "feeds.anchoreConfig.feeds.cycle_timers", + "anchoreEnterpriseFeeds.dbConfig": "feeds.anchoreConfig.dbConfig", + "anchoreEnterpriseFeeds.debianExtraReleases": "feeds.anchoreConfig.feeds.drivers.debian.releases", + + "anchoreEnterpriseFeeds.gemDriverEnabled": "feeds.anchoreConfig.feeds.drivers.gem.enabled", + "anchoreEnterpriseFeeds.githubDriverEnabled": "feeds.anchoreConfig.feeds.drivers.github.enabled", + "anchoreEnterpriseFeeds.githubDriverToken": "feeds.anchoreConfig.feeds.drivers.github.token", + + "anchoreEnterpriseFeeds.msrcWhitelist": "feeds.anchoreConfig.feeds.drivers.msrc.whitelist", + "anchoreEnterpriseFeeds.msrcDriverEnabled": "feeds.anchoreConfig.feeds.drivers.msrc.enabled", + + "anchoreEnterpriseFeeds.npmDriverEnabled": "feeds.anchoreConfig.feeds.drivers.npm.enabled", + + + "anchoreEnterpriseFeeds.persistence": "feeds.persistence", + "anchoreEnterpriseFeeds.ubuntuExtraReleases": "feeds.anchoreConfig.feeds.drivers.ubuntu.releases", + + "anchoreEnterpriseFeedsUpgradeJob.enabled": "feeds.feedsUpgradeJob.enabled", + "anchoreEnterpriseNotifications.cycleTimers": "anchoreConfig.notifications.cycle_timers", + "anchoreEnterpriseReports.cycleTimers": "anchoreConfig.reports_worker.cycle_timers", + "anchoreEnterpriseUi.appDBConfig": "anchoreConfig.ui.appdb_config", + "anchoreEnterpriseUi.authenticationLock": "anchoreConfig.ui.authentication_lock", + "anchoreEnterpriseUi.existingSecretName": "ui.existingSecretName", + "anchoreEnterpriseUi.image": "ui.image", + "anchoreEnterpriseUi.imagePullPolicy": "ui.imagePullPolicy", + "anchoreEnterpriseUi.ldapsRootCaCertName": "ui.ldapsRootCaCertName", + "anchoreGlobal.dbConfig": "anchoreConfig.database", + "anchoreGlobal.internalServicesSsl": "anchoreConfig.internalServicesSSL", + "anchoreGlobal.policyBundles": "anchoreConfig.policyBundles", + "anchoreGlobal.webhooks": "anchoreConfig.webhooks", + "anchorePolicyEngine.cycleTimers": "anchoreConfig.policy_engine.cycle_timers", + "anchorePolicyEngine.overrideFeedsToUpstream": "anchoreConfig.policy_engine.overrideFeedsToUpstream", + + "postgresql.externalEndpoint": "postgresql.externalEndpoint", + "postgresql.persistence": "postgresql.primary.persistence", + "postgresql.extraEnv": "postgresql.primary.extraEnvVars", + "anchore-feeds-db.extraEnv": "feeds.feeds-db.primary.extraEnvVars", + "anchore-feeds-gem-db.extraEnv": "feeds.gem-db.primary.extraEnvVars", + + "anchore-feeds-gem-db.persistence": "feeds.gem-db.primary.persistence", + "anchore-feeds-db.persistence": "feeds.feeds-db.primary.persistence", + + "anchoreEnterpriseRbac.managerResources": "rbacManager.resources", +} + +LEVEL_THREE_CHANGE_KEY_MAPPING = { + "anchore-feeds-db.persistence.resourcePolicy": "feeds.feeds-db.primary.persistence.resourcePolicy", + "anchore-feeds-db.persistence.size": "feeds.feeds-db.primary.persistence.size", + "anchoreAnalyzer.cycleTimers.image_analyzer": "anchoreConfig.analyzer.cycle_timers.image_analyzer", + "anchoreGlobal.saml.secret": "anchoreConfig.keys.secret", +} + +# We need to go all the way down to the value. Replace the whole original key +FULL_CHANGE_KEY_MAPPING = { + "fullnameOverride": "global.fullnameOverride", + "nameOverride": "global.nameOverride", + "postgresql.enabled": "postgresql.chartEnabled", + "postgresql.postgresDatabase": "postgresql.auth.database", + "postgresql.postgresPassword": "postgresql.auth.password", + "postgresql.postgresUser": "postgresql.auth.username", + "postgresql.postgresPort": "postgresql.primary.service.ports.postgresql", + "postgresql.imageTag": "postgresql.image.tag", + + "anchore-feeds-db.imageTag": "feeds.feeds-db.image.tag", + "anchore-feeds-gem-db.imageTag": "feeds.gem-db.image.tag", + "anchore-feeds-db.enabled": "feeds.feeds-db.chartEnabled", + + "anchore-feeds-db.postgresDatabase": "feeds.feeds-db.auth.database", + "anchore-feeds-db.postgresPassword": "feeds.feeds-db.auth.password", + "anchore-feeds-db.postgresPort": "feeds.feeds-db.primary.service.ports.postgresql", + "anchore-feeds-db.postgresUser": "feeds.feeds-db.auth.username", + + "anchore-feeds-gem-db.enabled": "feeds.gem-db.chartEnabled", + "anchore-feeds-gem-db.externalEndpoint": "feeds.gem-db.externalEndpoint", + + + "anchore-feeds-gem-db.postgresDatabase": "feeds.gem-db.auth.database", + "anchore-feeds-gem-db.postgresPassword": "feeds.gem-db.auth.password", + "anchore-feeds-gem-db.postgresPort": "feeds.gem-db.primary.service.ports.postgresql", + "anchore-feeds-gem-db.postgresUser": "feeds.gem-db.auth.username", + + + "anchoreAnalyzer.containerPort": "analyzer.service.port", + "anchoreAnalyzer.enableHints": "anchoreConfig.analyzer.enable_hints", + + "anchoreAnalyzer.layerCacheMaxGigabytes": "anchoreConfig.analyzer.layer_cache_max_gigabytes", + "anchoreApi.external.use_tls": "anchoreConfig.apiext.external.useTLS", + "anchoreCatalog.downAnalyzerTaskRequeue": "anchoreConfig.catalog.down_analyzer_task_requeue", + "anchoreCatalog.runtimeInventory.imageTTLDays": "anchoreConfig.catalog.runtime_inventory.image_ttl_days", + "anchoreEnterpriseFeeds.enabled": "feeds.chartEnabled", + "anchoreEnterpriseFeeds.nvdDriverApiKey": "feeds.anchoreConfig.feeds.drivers.nvdv2.api_key", + "anchoreEnterpriseNotifications.uiUrl": "anchoreConfig.notifications.ui_url", + + "anchoreEnterpriseRbac.service.managerPort": "rbacManager.service.port", + "anchoreEnterpriseRbac.service.type": "rbacManager.service.type", + + + "anchoreEnterpriseReports.dataEgressWindow": "anchoreConfig.reports_worker.data_egress_window", + "anchoreEnterpriseReports.dataLoadMaxWorkers": "anchoreConfig.reports_worker.data_load_max_workers", + "anchoreEnterpriseReports.dataRefreshMaxWorkers": "anchoreConfig.reports_worker.data_refresh_max_workers", + "anchoreEnterpriseReports.enableDataEgress": "anchoreConfig.reports_worker.enable_data_egress", + "anchoreEnterpriseReports.enableDataIngress": "anchoreConfig.reports_worker.enable_data_ingress", + "anchoreEnterpriseReports.enableGraphiql": "anchoreConfig.reports.enable_graphiql", + "anchoreEnterpriseReports.service.apiPort": "reports.service.port", + "anchoreEnterpriseUi.enableProxy": "anchoreConfig.ui.enable_proxy", + "anchoreEnterpriseUi.enableSharedLogin": "anchoreConfig.ui.enable_shared_login", + "anchoreEnterpriseUi.enableSsl": "anchoreConfig.ui.enable_ssl", + "anchoreEnterpriseUi.enrichInventoryView": "anchoreConfig.ui.enrich_inventory_view", + "anchoreEnterpriseUi.forceWebsocket": "anchoreConfig.ui.force_websocket", + "anchoreEnterpriseUi.logLevel": "anchoreConfig.ui.log_level", + "anchoreEnterpriseUi.dbUser": "ui.dbUser", + "anchoreEnterpriseUi.dbPass": "ui.dbPass", + "anchoreEnterpriseUi.redisHost": "anchoreConfig.ui.redis_host", + "anchoreEnterpriseUi.redisFlushdb": "anchoreConfig.ui.redis_flushdb", + "anchoreGlobal.dbConfig.connectionPoolMaxOverflow": "anchoreConfig.database.db_pool_max_overflow", + "anchoreGlobal.dbConfig.connectionPoolSize": "anchoreConfig.database.db_pool_size", + "anchoreGlobal.dbConfig.sslRootCertName": "anchoreConfig.database.sslRootCertFileName", + "anchoreGlobal.defaultAdminEmail": "anchoreConfig.default_admin_email", + "anchoreGlobal.defaultAdminPassword": "anchoreConfig.default_admin_password", + "anchoreGlobal.enableMetrics": "anchoreConfig.metrics.enabled", + "anchoreGlobal.hashedPasswords": "anchoreConfig.user_authentication.hashed_passwords", + "anchoreGlobal.internalServicesSsl.certSecretCertName": "anchoreConfig.internalServicesSSL.certSecretCertFileName", + "anchoreGlobal.internalServicesSsl.certSecretKeyName": "anchoreConfig.internalServicesSSL.certSecretKeyFileName", + "anchoreGlobal.logLevel": "anchoreConfig.log_level", + "anchoreGlobal.metricsAuthDisabled": "anchoreConfig.metrics.auth_disabled", + "anchoreGlobal.oauthEnabled": "anchoreConfig.user_authentication.oauth.enabled", + "anchoreGlobal.oauthRefreshTokenExpirationSeconds": "anchoreConfig.user_authentication.oauth.refresh_token_expiration_seconds", + "anchoreGlobal.oauthTokenExpirationSeconds": "anchoreConfig.user_authentication.oauth.default_token_expiration_seconds", + "anchoreGlobal.saml.privateKeyName": "anchoreConfig.keys.privateKeyFileName", + "anchoreGlobal.saml.publicKeyName": "anchoreConfig.keys.publicKeyFileName", + "anchoreGlobal.serviceDir": "anchoreConfig.service_dir", + "anchoreGlobal.ssoRequireExistingUsers": "anchoreConfig.user_authentication.sso_require_existing_users", + "cloudsql.image.pullPolicy": "cloudsql.imagePullPolicy", + "inject_secrets_via_env": "injectSecretsViaEnv", + + + "ui-redis.enabled": "ui-redis.chartEnabled", + "anchoreGlobal.allowECRUseIAMRole": "anchoreConfig.allow_awsecr_iam_auto", +} + +#### ENGINE TO ENTERPRISE FOR KEYS THAT ARE NOW ENV VARS #### +ENTERPRISE_ENV_VAR_MAPPING = { + "anchoreAnalyzer.maxRequestThreads": "analyzer.ANCHORE_MAX_REQUEST_THREADS", + "anchoreAnalyzer.enableOwnedPackageFiltering": "analyzer.ANCHORE_OWNED_PACKAGE_FILTERING_ENABLED", + "anchoreApi.maxRequestThreads": "api.ANCHORE_MAX_REQUEST_THREADS", + "anchoreCatalog.maxRequestThreads": "catalog.ANCHORE_MAX_REQUEST_THREADS", + "anchoreCatalog.imageGCMaxWorkerThreads": "catalog.ANCHORE_CATALOG_IMAGE_GC_WORKERS", + + "anchoreEnterpriseNotifications.maxRequestThreads": "notifications.ANCHORE_MAX_REQUEST_THREADS", + "anchoreEnterpriseReports.maxRequestThreads": "reports.ANCHORE_MAX_REQUEST_THREADS", + + "anchoreGlobal.clientConnectTimeout": "ANCHORE_GLOBAL_CLIENT_CONNECT_TIMEOUT", + "anchoreGlobal.clientReadTimeout": "ANCHORE_GLOBAL_CLIENT_READ_TIMEOUT", + "anchoreGlobal.maxCompressedImageSizeMB": "ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB", + "anchoreGlobal.serverRequestTimeout": "ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC", + "anchoreGlobal.syncGithub": "ANCHORE_FEEDS_GITHUB_ENABLED", + "anchoreGlobal.syncPackages": "ANCHORE_FEEDS_PACKAGES_ENABLED", + "anchoreGlobal.syncVulnerabilites": "ANCHORE_FEEDS_VULNERABILITIES_ENABLED", + "anchoreGlobal.syncNvd": "ANCHORE_FEEDS_DRIVER_NVDV2_ENABLED", + "anchoreGlobal.imageAnalyzeTimeoutSeconds": "ANCHORE_IMAGE_ANALYZE_TIMEOUT_SECONDS", + + "anchorePolicyEngine.cacheTTL": "policyEngine.ANCHORE_POLICY_EVAL_CACHE_TTL_SECONDS", + "anchorePolicyEngine.enablePackageDbLoad": "policyEngine.ANCHORE_POLICY_ENGINE_ENABLE_PACKAGE_DB_LOAD", + "anchorePolicyEngine.maxRequestThreads": "policyEngine.ANCHORE_MAX_REQUEST_THREADS", + "anchoreSimpleQueue.maxRequestThreads": "simpleQueue.ANCHORE_MAX_REQUEST_THREADS", + "anchoreEnterpriseReports.vulnerabilitiesByK8sNamespace": "ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_K8S_NAMESPACE", + "anchoreEnterpriseReports.vulnerabilitiesByK8sContainer": "ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_K8S_CONTAINER", + "anchoreEnterpriseReports.vulnerabilitiesByEcsContainer": "ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_ECS_CONTAINER" +} + +#### ENGINE TO FEEDS KEYS THAT ARE NOW ENV VARS #### +FEEDS_ENV_VAR_MAPPING = { + + "anchoreEnterpriseFeeds.alpineDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_ALPINE_ENABLED", + "anchoreEnterpriseFeeds.amazonDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_AMAZON_ENABLED", + "anchoreEnterpriseFeeds.anchoreMatchExclusionsEnabled": "feeds.ANCHORE_FEEDS_DRIVER_MATCH_EXCLUSIONS", + "anchoreEnterpriseFeeds.apiOnly": "feeds.ANCHORE_FEEDS_API_ONLY", + "anchoreEnterpriseFeeds.chainguardDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_CHAINGUARD_ENABLED", + "anchoreEnterpriseFeeds.debianDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_DEBIAN_ENABLED", + "anchoreEnterpriseFeeds.grypeDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_GRYPEDB_ENABLED", + "anchoreEnterpriseFeeds.grypedbPersistProviderWorkspaces": "feeds.ANCHORE_FEEDS_GRYPEDB_PERSIST_WORKSPACE", + "anchoreEnterpriseFeeds.grypedbPreloadEnabled": "feeds.ANCHORE_FEEDS_GRYPEDB_PRELOAD_ENABLED", + "anchoreEnterpriseFeeds.grypedbPreloadWorkspaceArchivePath": "feeds.ANCHORE_FEEDS_GRYPEDB_PRELOAD_PATH", + "anchoreEnterpriseFeeds.grypedbRestoreProviderWorkspaces": "feeds.ANCHORE_FEEDS_GRYPEDB_RESTORE_WORKSPACE", + "anchoreEnterpriseFeeds.maxRequestThreads": "feeds.ANCHORE_MAX_REQUEST_THREADS", + + "anchoreEnterpriseFeeds.olDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_OL_ENABLED", + "anchoreEnterpriseFeeds.rhelDriverConcurrency": "feeds.ANCHORE_FEEDS_DRIVER_RHEL_CONCURRENCY", + "anchoreEnterpriseFeeds.rhelDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_RHEL_ENABLED", + "anchoreEnterpriseFeeds.slesDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_SLES_ENABLED", + "anchoreEnterpriseFeeds.ubuntuDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_UBUNTU_ENABLED", + "anchoreEnterpriseFeeds.ubuntuDriverGitBranch": "feeds.ANCHORE_FEEDS_DRIVER_UBUNTU_BRANCH", + "anchoreEnterpriseFeeds.ubuntuDriverGitUrl": "feeds.ANCHORE_FEEDS_DRIVER_UBUNTU_URL", + "anchoreEnterpriseFeeds.wolfiDriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_WOLFI_ENABLED", + "anchoreEnterpriseFeeds.nvdv2DriverEnabled": "feeds.ANCHORE_FEEDS_DRIVER_NVDV2_ENABLED", +} + +#### VALUES THAT ARE NO LONGER PART OF THE NEW CHART #### +DEPRECATED_KEYS = { + + "anchoreEngineUpgradeJob": "deprecated", + + "anchoreEnterpriseFeeds.nvdDriverEnabled": "deprecated", + "anchoreEnterpriseFeeds.useNvdDriverApiKey": "deprecated", + + "anchoreEnterpriseGlobal.enabled": "deprecated", + "anchoreEnterpriseNotifications.enabled": "deprecated", + "anchoreEnterpriseRbac.enabled": "deprecated", + "anchoreEnterpriseRbac.service.authPort": "8089", + "anchoreEnterpriseReports.enabled": "deprecated", + "anchoreEnterpriseUi.enabled": "deprecated", + "anchoreGlobal.feedsConnectionTimeout": "3", + "anchoreGlobal.feedsReadTimeout": "60", + "anchoreGlobal.image": "deprecated", + + "anchoreGlobal.imagePullPolicy": "deprecated", + "anchoreGlobal.imagePullSecretName": "deprecated", + "anchoreGlobal.syncGrypeDB": "true", + "anchoreGlobal.webhooksEnabled": "deprecated", + "postgresql.persistence.resourcePolicy": "deprecated", + "anchoreGlobal.saml.useExistingSecret": "deprecated", + "anchoreEnterpriseReports.service.workerPort": "deprecated", + "anchoreAnalyzer.concurrentTasksPerWorker": "deprecated", +} + +POST_PROCESSING = { + "postgresql.image": { + "action": "split_value", + "split_on": ":", + "new_keys": ("postgresql.image.repository", "postgresql.image.tag") + }, + "anchore-feeds-db.image": { + "action": "split_value", + "split_on": ":", + "new_keys": ("feeds.feeds-db.image.repository", "feeds.feeds-db.image.tag") + }, + "anchore-feeds-gem-db.image": { + "action": "split_value", + "split_on": ":", + "new_keys": ("feeds.gem-db.image.repository", "feeds.gem-db.image.tag") + }, + "cloudsql.image.repository": { + "action": "merge", + "merge_keys": ("cloudsql.image.repository", "cloudsql.image.tag"), + "new_key": "cloudsql.image" + }, + "cloudsql.image.tag": { + "action": "merge", + "merge_keys": ("cloudsql.image.repository", "cloudsql.image.tag"), + "new_key": "cloudsql.image" + }, + "anchoreEnterpriseRbac.extraEnv": { + "action": "duplicate", + "new_keys": ["rbacManager.extraEnv"] + }, + "anchoreEnterpriseGlobal.imagePullSecretName": { + "action": "duplicate", + "new_keys": ["imagePullSecretName", "feeds.imagePullSecretName"] + }, + "anchoreEnterpriseFeeds.existingSecretName": { + "action": "key_addition", + "new_keys": [("feeds.existingSecretName", "default"), ("feeds.useExistingSecrets", True)] + } +} diff --git a/scripts/enterprise-value-converter/tests/configs/test_convert_values_file.yaml b/scripts/enterprise-value-converter/tests/configs/test_convert_values_file.yaml new file mode 100644 index 00000000..38f527ed --- /dev/null +++ b/scripts/enterprise-value-converter/tests/configs/test_convert_values_file.yaml @@ -0,0 +1,18 @@ +anchoreEnterpriseGlobal: + enabled: true + +anchoreGlobal: + useExistingSecrets: true + existingSecretName: global-existing-secrets + extraEnv: + - name: ANCHORE_MAX_REQUEST_THREADS + value: "9876543210" + +anchoreEnterpriseUi: + existingSecretName: ui-existing-secrets + +anchoreEnterpriseFeeds: + existingSecretName: feeds-existing-secrets + +anchoreApi: + maxRequestThreads: 9876543210 diff --git a/scripts/enterprise-value-converter/tests/configs/test_convert_values_file_result.yaml b/scripts/enterprise-value-converter/tests/configs/test_convert_values_file_result.yaml new file mode 100644 index 00000000..312a5a3e --- /dev/null +++ b/scripts/enterprise-value-converter/tests/configs/test_convert_values_file_result.yaml @@ -0,0 +1,20 @@ +"existingSecretName": "global-existing-secrets" +"feeds": + "existingSecretName": "feeds-existing-secrets" + "useExistingSecrets": true +"ui": + "existingSecretName": "ui-existing-secrets" +"useExistingSecrets": true +"api": + "extraEnv": + - "name": "ANCHORE_MAX_REQUEST_THREADS" + "value": "9876543210" +"anchoreConfig": + "user_authentication": + "hashed_passwords": False +"extraEnv": + - "name": "ANCHORE_MAX_REQUEST_THREADS" + "value": "9876543210" +"postgresql": + "auth": + "username": "anchoreengine" \ No newline at end of file diff --git a/scripts/enterprise-value-converter/tests/test_anchoreAnalyzer_value_mapping.py b/scripts/enterprise-value-converter/tests/test_anchoreAnalyzer_value_mapping.py new file mode 100644 index 00000000..52337394 --- /dev/null +++ b/scripts/enterprise-value-converter/tests/test_anchoreAnalyzer_value_mapping.py @@ -0,0 +1,338 @@ +import os +import shutil +import unittest +from helpers import ( + replace_keys_with_mappings, +) + +class TestReplaceKeysWithMappingsAnalyzer(unittest.TestCase): + def setUp(self): + self.results_dir = "test_results_dir" + + def tearDown(self): + if os.path.exists(self.results_dir): + shutil.rmtree(self.results_dir) + + def test_anchoreAnalyzer_replicaCount_value(self): + dot_string_dict = { + "anchoreAnalyzer.replicaCount": 2, + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'replicaCount': 2 + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_containerPort_value(self): + dot_string_dict = { + "anchoreAnalyzer.containerPort": 8084, + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'service': { + 'port': 8084 + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_extraEnv_value(self): + dot_string_dict = { + "anchoreAnalyzer.extraEnv": [ + { + "name": "foo", + "value": "bar" + } + ] + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'extraEnv': [ + { + 'name': 'foo', + 'value': 'bar' + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_serviceAccountName_value(self): + dot_string_dict = { + "anchoreAnalyzer.serviceAccountName": "foo", + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'serviceAccountName': 'foo' + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_resources_value(self): + dot_string_dict = { + "anchoreAnalyzer.resources.limits.cpu": 1, + "anchoreAnalyzer.resources.limits.memory": "4G", + "anchoreAnalyzer.resources.requests.cpu": 1, + "anchoreAnalyzer.resources.requests.memory": "1G", + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'resources': { + 'limits': { + 'cpu': 1, + 'memory': '4G' + }, + 'requests': { + 'cpu': 1, + 'memory': '1G' + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_labels_value(self): + dot_string_dict = { + "anchoreAnalyzer.labels.name": "foo", + "anchoreAnalyzer.labels.value": "bar", + "anchoreAnalyzer.labels.kubernetes.io/description": "baz", + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'labels': + { + 'name': 'foo', + 'value': 'bar', + 'kubernetes.io/description': 'baz' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_annotations_value(self): + dot_string_dict = { + "anchoreAnalyzer.annotations.name": "foo", + "anchoreAnalyzer.annotations.value": "bar", + "anchoreAnalyzer.annotations.kubernetes.io/description": "baz", + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'annotations': + { + 'name': 'foo', + 'value': 'bar', + 'kubernetes.io/description': 'baz' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreanalyzer_deploymentAnnotations_value(self): + dot_string_dict = { + "anchoreAnalyzer.deploymentAnnotations.name": "foo", + "anchoreAnalyzer.deploymentAnnotations.value": "bar", + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'deploymentAnnotations': { + 'name': 'foo', + 'value': 'bar' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_nodeSelector_value(self): + dot_string_dict = { + "anchoreAnalyzer.nodeSelector.name": "foo", + "anchoreAnalyzer.nodeSelector.value": "bar", + + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'nodeSelector': + { + 'name': 'foo', + 'value': 'bar' + } + + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_tolerations_value(self): + dot_string_dict = { + "anchoreAnalyzer.tolerations": [ + { + "name": "foo", + "value": "bar" + } + ] + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'tolerations': [ + { + 'name': 'foo', + 'value': 'bar' + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_affinity_value(self): + dot_string_dict = { + "anchoreAnalyzer.affinity.name": "foo", + "anchoreAnalyzer.affinity.value": "bar", + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'analyzer': { + 'affinity': { + 'name': 'foo', + 'value': 'bar' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_cycleTimers_image_analyzer_value(self): + dot_string_dict = { + "anchoreAnalyzer.cycleTimers.image_analyzer": 1, + } + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': { + 'user_authentication': {'hashed_passwords': False}, + 'analyzer': { + 'cycle_timers': { + 'image_analyzer': 1 + } + } + } + + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_layerCacheMaxGigabytes_value(self): + dot_string_dict = { + "anchoreAnalyzer.layerCacheMaxGigabytes": 1, + } + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': { + 'user_authentication': {'hashed_passwords': False}, + 'analyzer': { + 'layer_cache_max_gigabytes': 1 + } + } + + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_enableHints_value(self): + dot_string_dict = { + "anchoreAnalyzer.enableHints": False, + } + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': { + 'user_authentication': {'hashed_passwords': False}, + 'analyzer': { + 'enable_hints': False + } + } + + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreAnalyzer_configFile_value(self): + dot_string_dict = { + "anchoreAnalyzer.configFile.retrieve_files.file_list": [ + "/etc/passwd" + ], + "anchoreAnalyzer.configFile.secret_search.match_params": [ + "MAXFILESIZE=10000", + "STOREONMATCH=n" + ], + "anchoreAnalyzer.configFile.secret_search.regexp_match": [ + "AWS_ACCESS_KEY=(?i).*aws_access_key_id( *=+ *).*(?/", + } + + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': { + 'user_authentication': {'hashed_passwords': False}, + 'webhooks': { + 'ssl_verify': False, + 'url': 'http://somehost:9090//', + 'webhook_pass': 'my-webhook-pass', + 'webhook_user': 'my-webhook-user' + } + } + } + + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreGlobal_policyBundles_values(self): + dot_string_dict = { + 'anchoreGlobal.policyBundles.custom_policy_bundle1.json': '{\n "id": "custom1",\n "version": "1_0",\n "name": "My custom bundle",\n "comment": "My system\'s custom bundle",\n "whitelisted_images": [],\n "blacklisted_images": [],\n "mappings": [],\n "whitelists": [],\n "policies": []\n}\n' + } + + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': { + 'user_authentication': {'hashed_passwords': False}, + 'policyBundles': { + 'custom_policy_bundle1': { + 'json': '{\n "id": "custom1",\n "version": "1_0",\n "name": "My custom bundle",\n "comment": "My system\'s custom bundle",\n "whitelisted_images": [],\n "blacklisted_images": [],\n "mappings": [],\n "whitelists": [],\n "policies": []\n}\n' + } + } + } + } + + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreGlobal_probes_values(self): + dot_string_dict = { + "anchoreGlobal.probes.liveness.initialDelaySeconds": 120, + "anchoreGlobal.probes.liveness.timeoutSeconds": 10, + "anchoreGlobal.probes.liveness.periodSeconds": 10, + "anchoreGlobal.probes.liveness.failureThreshold": 6, + "anchoreGlobal.probes.liveness.successThreshold": 1, + "anchoreGlobal.probes.readiness.timeoutSeconds": 10, + "anchoreGlobal.probes.readiness.periodSeconds": 10, + "anchoreGlobal.probes.readiness.failureThreshold": 3, + "anchoreGlobal.probes.readiness.successThreshold": 1, + } + + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'probes': { + 'liveness': { + 'failureThreshold': 6, + 'initialDelaySeconds': 120, + 'periodSeconds': 10, + 'successThreshold': 1, + 'timeoutSeconds': 10 + }, + 'readiness': { + 'failureThreshold': 3, + 'periodSeconds': 10, + 'successThreshold': 1, + 'timeoutSeconds': 10 + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + # inject_secrets_via_env: false + def test_anchoreGlobal_inject_secrets_via_env_value(self): + dot_string_dict = { + "inject_secrets_via_env": True, + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'injectSecretsViaEnv': True + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_replace_keys_with_mappings_env_var(self): + + dot_string_dict = {"anchoreApi.maxRequestThreads": 999} + expected_result = { + 'api': + {'extraEnv': [ + {'name': 'ANCHORE_MAX_REQUEST_THREADS', 'value': 999} + ]} + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[1], expected_result) + + def test_replace_keys_with_mappings(self): + + dot_string_dict = {"anchore-feeds-db.persistence.size": 100} + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + "feeds": { + "feeds-db": { + "primary": { + "persistence": { + "size": 100 + } + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + # now an environment variable + def test_anchoreGlobal_serverRequestTimeout_value(self): + dot_string_dict = { + "anchoreGlobal.serverRequestTimeout": 300, + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}},} + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + expected_env_result = { + 'extraEnv': + [ + { + 'name': 'ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC', + 'value': 300 + } + ] + } + self.assertEqual(result[1], expected_env_result) + + def test_anchoreGlobal_maxCompressedImageSizeMB_value(self): + dot_string_dict = { + "anchoreGlobal.maxCompressedImageSizeMB": 700 + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + } + + expected_env_result = { + 'extraEnv': + [ + { + 'name': 'ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB', + 'value': 700 + } + ] + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[1], expected_env_result) + + +if __name__ == '__main__': + unittest.main() diff --git a/scripts/enterprise-value-converter/tests/test_anchorePolicyEngine_value_mapping.py b/scripts/enterprise-value-converter/tests/test_anchorePolicyEngine_value_mapping.py new file mode 100644 index 00000000..00e1a821 --- /dev/null +++ b/scripts/enterprise-value-converter/tests/test_anchorePolicyEngine_value_mapping.py @@ -0,0 +1,301 @@ +import os +import shutil +import unittest +from helpers import ( + replace_keys_with_mappings, +) + +class TestReplaceKeysWithMappingsPolicyEngine(unittest.TestCase): + def setUp(self): + self.results_dir = "test_results_dir" + + def tearDown(self): + if os.path.exists(self.results_dir): + shutil.rmtree(self.results_dir) + + def test_anchorePolicyEngine_replicaCount_value(self): + dot_string_dict = { + "anchorePolicyEngine.replicaCount": 2, + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'replicaCount': 2 + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + + def test_anchorePolicyEngine_resources_value(self): + dot_string_dict = { + "anchorePolicyEngine.resources.limits.cpu": 1, + "anchorePolicyEngine.resources.limits.memory": "4G", + "anchorePolicyEngine.resources.requests.cpu": 1, + "anchorePolicyEngine.resources.requests.memory": "1G" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'resources': { + 'limits': { + 'cpu': 1, + 'memory': '4G' + }, + 'requests': { + 'cpu': 1, + 'memory': '1G' + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_labels_value(self): + dot_string_dict = { + "anchorePolicyEngine.labels.foobar": "baz", + "anchorePolicyEngine.labels.with.a.dot.foobar": "baz" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'labels': + { + 'foobar': 'baz', + 'with.a.dot.foobar': 'baz' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_annotations_value(self): + dot_string_dict = { + "anchorePolicyEngine.annotations.foobar": "baz", + "anchorePolicyEngine.annotations.with.a.dot.foobar": "baz" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'annotations': + { + 'foobar': 'baz', + 'with.a.dot.foobar': 'baz' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_deploymentAnnotations_value(self): + dot_string_dict = { + "anchorePolicyEngine.deploymentAnnotations.foobar": "baz", + "anchorePolicyEngine.deploymentAnnotations.with.a.dot.foobar": "baz" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'deploymentAnnotations': { + 'foobar': 'baz', + 'with.a.dot.foobar': 'baz' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_nodeSelector_value(self): + dot_string_dict = { + "anchorePolicyEngine.nodeSelector.name": "foo", + "anchorePolicyEngine.nodeSelector.with.a.dot.name": "bar" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'nodeSelector': { + 'name': 'foo', + 'with.a.dot.name': 'bar' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_tolerations_value(self): + dot_string_dict = { + "anchorePolicyEngine.tolerations": [ + { + "key": "key", + "operator": "Equal", + "value": "value", + "effect": "NoSchedule" + } + ] + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'tolerations': [ + { + 'key': 'key', + 'operator': 'Equal', + 'value': 'value', + 'effect': 'NoSchedule' + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_affinity_value(self): + dot_string_dict = { + "anchorePolicyEngine.affinity.name": "foo" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'affinity': + { + 'name': 'foo' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_extraEnv_value(self): + dot_string_dict = { + "anchorePolicyEngine.extraEnv": [ + { + "name": "foo", + "value": "bar" + } + ] + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'extraEnv': [ + { + "name": "foo", + "value": "bar" + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_serviceAccountName_value(self): + dot_string_dict = { + "anchorePolicyEngine.serviceAccountName": "Null" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'serviceAccountName': "Null" + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + + def test_anchorePolicyEngine_service_value(self): + dot_string_dict = { + "anchorePolicyEngine.service.name": "Null", + "anchorePolicyEngine.service.type": "ClusterIP", + "anchorePolicyEngine.service.port": 8087, + "anchorePolicyEngine.service.annotations.foo": "bar", + "anchorePolicyEngine.service.annotations.with.a.dot": "qux", + "anchorePolicyEngine.service.labels.foobar": "baz", + "anchorePolicyEngine.service.labels.with.a.dot": "qux", + } + + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'policyEngine': { + 'service': { + "name": "Null", + "type": "ClusterIP", + "port": 8087, + "annotations": { + "foo": "bar", + "with.a.dot": "qux" + }, + "labels": { + "foobar": "baz", + "with.a.dot": "qux" + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_cycleTimers_value(self): + dot_string_dict = { + "anchorePolicyEngine.cycleTimers.feed_sync": 14400, + "anchorePolicyEngine.cycleTimers.feed_sync_checker": 3600, + "anchorePolicyEngine.cycleTimers.grypedb_sync": 60, + } + + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': { + 'user_authentication': {'hashed_passwords': False}, + 'policy_engine': { + 'cycle_timers': { + "feed_sync": 14400, + "feed_sync_checker": 3600, + "grypedb_sync": 60, + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchorePolicyEngine_overrideFeedsToUpstream_value(self): + dot_string_dict = { + "anchorePolicyEngine.overrideFeedsToUpstream": True + } + + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': { + 'user_authentication': {'hashed_passwords': False}, + 'policy_engine': { + 'overrideFeedsToUpstream': True + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + # Values that become environment variables for Anchore Policy Engine + def test_anchorePolicyEngine_cacheTTL_value(self): + dot_string_dict = { + "anchorePolicyEngine.cacheTTL": 3600, + } + + expected_result = { + 'policyEngine': { + 'extraEnv': [ + { + 'name': 'ANCHORE_POLICY_EVAL_CACHE_TTL_SECONDS', + 'value': 3600 + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[1], expected_result) + + def test_anchorePolicyEngine_enablePackageDbLoad_value(self): + dot_string_dict = { + "anchorePolicyEngine.enablePackageDbLoad": True, + } + + expected_result = { + 'policyEngine': { + 'extraEnv': [ + { + 'name': 'ANCHORE_POLICY_ENGINE_ENABLE_PACKAGE_DB_LOAD', + 'value': True + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[1], expected_result) diff --git a/scripts/enterprise-value-converter/tests/test_anchoreSimpleQueue_value_mapping.py b/scripts/enterprise-value-converter/tests/test_anchoreSimpleQueue_value_mapping.py new file mode 100644 index 00000000..69551464 --- /dev/null +++ b/scripts/enterprise-value-converter/tests/test_anchoreSimpleQueue_value_mapping.py @@ -0,0 +1,234 @@ +import os +import shutil +import unittest +from helpers import ( + replace_keys_with_mappings, +) + +class TestReplaceKeysWithMappingsSimpleQueue(unittest.TestCase): + def setUp(self): + self.results_dir = "test_results_dir" + + def tearDown(self): + if os.path.exists(self.results_dir): + shutil.rmtree(self.results_dir) + + def test_anchoreSimpleQueue_replicaCount_value(self): + dot_string_dict = { + "anchoreSimpleQueue.replicaCount": 2, + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'replicaCount': 2 + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + + def test_anchoreSimpleQueue_resources_value(self): + dot_string_dict = { + "anchoreSimpleQueue.resources.limits.cpu": 1, + "anchoreSimpleQueue.resources.limits.memory": "4G", + "anchoreSimpleQueue.resources.requests.cpu": 1, + "anchoreSimpleQueue.resources.requests.memory": "1G" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'resources': { + 'limits': { + 'cpu': 1, + 'memory': '4G' + }, + 'requests': { + 'cpu': 1, + 'memory': '1G' + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + + def test_anchoreSimpleQueue_labels_value(self): + dot_string_dict = { + "anchoreSimpleQueue.labels.myLabel": "myValue", + "anchoreSimpleQueue.labels.myOtherLabel": "myOtherValue", + "anchoreSimpleQueue.labels.anotherLabel.with.a.dot": "qux" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'labels': + { + 'myLabel': 'myValue', + 'myOtherLabel': 'myOtherValue', + 'anotherLabel.with.a.dot': 'qux' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreSimpleQueue_annotations_value(self): + dot_string_dict = { + "anchoreSimpleQueue.annotations.foo": "bar", + "anchoreSimpleQueue.annotations.bar": "baz", + "anchoreSimpleQueue.annotations.anotherLabel.with.a.dot": "qux" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'annotations': + { + 'foo': 'bar', + 'bar': 'baz', + 'anotherLabel.with.a.dot': 'qux' + } + + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreSimpleQueue_deploymentAnnotations_value(self): + dot_string_dict = { + "anchoreSimpleQueue.deploymentAnnotations.foo": "bar", + "anchoreSimpleQueue.deploymentAnnotations.bar": "baz", + "anchoreSimpleQueue.deploymentAnnotations.anotherLabel.with.a.dot": "qux" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'deploymentAnnotations': + { + 'foo': 'bar', + 'bar': 'baz', + 'anotherLabel.with.a.dot': 'qux' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreSimpleQueue_nodeSelector_value(self): + dot_string_dict = { + "anchoreSimpleQueue.nodeSelector.name": "foo", + "anchoreSimpleQueue.nodeSelector.value": "bar", + "anchoreSimpleQueue.nodeSelector.anotherLabel.with.a.dot": "baz" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'nodeSelector': + { + 'name': 'foo', + 'value': 'bar', + 'anotherLabel.with.a.dot': 'baz' + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreSimpleQueue_tolerations_value(self): + dot_string_dict = { + "anchoreSimpleQueue.tolerations": [ + { + "name": "foo", + "value": "bar" + } + ] + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'tolerations': [ + { + 'name': 'foo', + 'value': 'bar' + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreSimpleQueue_affinity_value(self): + dot_string_dict = { + "anchoreSimpleQueue.affinity.name": "foo", + "anchoreSimpleQueue.affinity.value": "bar", + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'affinity':{ + 'name': 'foo', + 'value': 'bar', + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreSimpleQueue_extraEnv_value(self): + dot_string_dict = { + "anchoreSimpleQueue.extraEnv": [ + { + "name": "foo", + "value": "bar" + } + ] + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'extraEnv': [ + { + "name": "foo", + "value": "bar" + } + ] + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_anchoreSimpleQueue_serviceAccountName_value(self): + dot_string_dict = { + "anchoreSimpleQueue.serviceAccountName": "Null" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'serviceAccountName': "Null" + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + + def test_anchoreSimpleQueue_service_value(self): + dot_string_dict = { + "anchoreSimpleQueue.service.name": "Null", + "anchoreSimpleQueue.service.type": "ClusterIP", + "anchoreSimpleQueue.service.port": 8082, + "anchoreSimpleQueue.service.annotations.foo": "bar", + "anchoreSimpleQueue.service.annotations.baz": "qux", + "anchoreSimpleQueue.service.annotations.with.a.dot": "quux", + "anchoreSimpleQueue.service.labels.foobar": "baz", + "anchoreSimpleQueue.service.labels.with.a.dot": "qux" + } + expected_result = { 'postgresql': {'auth': {'username': 'anchoreengine'}}, 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'simpleQueue': { + 'service': { + "name": "Null", + "type": "ClusterIP", + "port": 8082, + "annotations": { + "foo": "bar", + "baz": "qux", + "with.a.dot": "quux" + }, + "labels": { + "foobar": "baz", + "with.a.dot": "qux" + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) diff --git a/scripts/enterprise-value-converter/tests/test_helpers.py b/scripts/enterprise-value-converter/tests/test_helpers.py new file mode 100644 index 00000000..b99d5833 --- /dev/null +++ b/scripts/enterprise-value-converter/tests/test_helpers.py @@ -0,0 +1,321 @@ +# test_helpers.py +import os +import shutil +import unittest +import yaml +from helpers import ( + create_new_dotstring, + write_to_file, + prep_dir, + dict_keys_to_dot_string, + merge_dicts, + replace_keys_with_mappings, + create_dict_entry, + convert_values_file +) + +# write_to_file(data, file_name): writes data to file_name, returns file_name +class TestWriteToFile(unittest.TestCase): + def setUp(self): + self.test_filename = 'test_file.txt' + + def tearDown(self): + if os.path.exists(self.test_filename): + os.remove(self.test_filename) + + def test_write_to_file(self): + data = 'Hello, world!' + file_name = write_to_file(data, self.test_filename) + + self.assertTrue(os.path.exists(self.test_filename)) + self.assertEqual(file_name, self.test_filename) + + with open(self.test_filename, 'r') as file: + written_data = file.read() + + self.assertEqual(written_data, data) + +# prep_dir(directory_name, clean=False): creates directory_name if it doesn't exist, returns directory_name +class TestPrepDir(unittest.TestCase): + def empty_dir(self, directory_path): + # if listdir returns an empty list, the directory is empty, return true + return not os.listdir(directory_path) + + def setUp(self): + self.prep_dir_name = 'prep_dir_name' + if os.path.exists(self.prep_dir_name): + shutil.rmtree(self.prep_dir_name) + + def tearDown(self): + if os.path.exists(self.prep_dir_name): + shutil.rmtree(self.prep_dir_name) + + def test_prep_dir_with_clean(self): + # create the self.prep_dir_name directory with some stuff in it to confirm its cleared out + os.makedirs(self.prep_dir_name) + file_path = os.path.join(self.prep_dir_name, "test_file.txt") + + # Create and close an empty file + with open(file_path, 'w'): + pass + + self.assertFalse(self.empty_dir(self.prep_dir_name)) + + # clean=True deletes the whole directory, then recreates it + prep_dir_path = prep_dir(self.prep_dir_name, clean=True) + self.assertTrue(os.path.exists(self.prep_dir_name)) + self.assertTrue(self.empty_dir(self.prep_dir_name)) + self.assertEqual(prep_dir_path, self.prep_dir_name) + + def test_prep_dir_without_clean(self): + # create the self.prep_dir_name directory with some stuff in it to confirm its not cleared out + os.makedirs(self.prep_dir_name) + file_path = os.path.join(self.prep_dir_name, "test_file.txt") + + # Create and close an empty file + with open(file_path, 'w'): + pass + + self.assertFalse(self.empty_dir(self.prep_dir_name)) + + # clean=False just creates the directory if it doesn't exist + prep_dir_path = prep_dir(self.prep_dir_name, clean=False) + self.assertTrue(os.path.exists(self.prep_dir_name)) + self.assertEqual(prep_dir_path, self.prep_dir_name) + self.assertFalse(self.empty_dir(self.prep_dir_name)) + +# dict_keys_to_dot_string(dictionary, prefix=''): recursively converts dictionary keys to dot string representation +# # return a dictionary where the keys are dot string representation of the old keys and +# the value is the original values +class TestDictKeysToDotString(unittest.TestCase): + def test_dict_keys_to_dotstring(self): + my_dict = { + "key1": "value1", + "key2": "value2", + "key3": { + "key31": "value31", + "key32": "value32", + "key33": { + "key331": "value331", + "key332": "value332", + "key333": ["value3331", "value3332"] + } + }, + "key4": ["value41", "value42"], + "key5": 5, + "key6": False + } + + result = dict_keys_to_dot_string(my_dict) + + self.assertIn("key1", result) + self.assertEqual(result["key1"], "value1") + self.assertTrue(isinstance(result["key1"], str)) + + self.assertIn("key2", result) + self.assertEqual(result["key2"], "value2") + self.assertTrue(isinstance(result["key2"], str)) + + self.assertIn("key3.key31", result) + self.assertEqual(result["key3.key31"], "value31") + self.assertTrue(isinstance(result["key3.key31"], str)) + + self.assertIn("key3.key32", result) + self.assertEqual(result["key3.key32"], "value32") + self.assertTrue(isinstance(result["key3.key32"], str)) + + self.assertIn("key3.key33.key331", result) + self.assertEqual(result["key3.key33.key331"], "value331") + self.assertTrue(isinstance(result["key3.key33.key331"], str)) + + self.assertIn("key3.key33.key332", result) + self.assertEqual(result["key3.key33.key332"], "value332") + self.assertTrue(isinstance(result["key3.key33.key332"], str)) + + self.assertIn("key3.key33.key333", result) + self.assertEqual(result["key3.key33.key333"], ["value3331", "value3332"]) + self.assertTrue(isinstance(result["key3.key33.key333"], list)) + + self.assertIn("key4", result) + self.assertEqual(result["key4"], ["value41", "value42"]) + self.assertTrue(isinstance(result["key4"], list)) + + self.assertIn("key5", result) + self.assertEqual(result["key5"], 5) + self.assertTrue(isinstance(result["key5"], int)) + + self.assertIn("key6", result) + self.assertEqual(result["key6"], False) + self.assertTrue(isinstance(result["key6"], bool)) + + self.assertTrue(isinstance(result, dict)) + +# merge_dicts(dict1, dict2): merges dictionaries, returns merged dictionary +class TestMergeDicts(unittest.TestCase): + def test_merge_dicts(self): + dicts1 = { + "key1": "value1", + "nested_keys": { + "uncommon": "uncommon_value", + "common": "dict1_common_value" + }, + "common_key": "dict1_common_value" + } + + dict2 = { + "key2": "value2", + "nested_keys": { + "common": "dict2_common_value" + }, + "common_key": "dict2_common_value" + } + + expected_dict = { + "key1": "value1", + "key2": "value2", + "nested_keys": { + "uncommon": "uncommon_value", + "common": "dict2_common_value" + }, + "common_key": "dict2_common_value" + } + + merge_dicts_result = merge_dicts(dicts1, dict2) + + self.assertEqual(merge_dicts_result, expected_dict) + +# create_new_dotstring(keys: list, dotstring: str, level: int) -> str +# takes the original key as a list, a dotstring representation of the new key, and the level that the replacement should occur +# strips off the level number from the original key, and appends the dotstring representation of the new key as a list to the end of the original key +# returns a string +class TestCreateNewDotString(unittest.TestCase): + def test_create_new_dotstring_level_1(self): + keys = ["key1", "key2", "key3"] + dotstring = "key4" + level = 1 + + expected_result = "key4.key2.key3" + + result = create_new_dotstring(keys, dotstring, level) + + self.assertEqual(result, expected_result) + + def test_create_new_dotstring_level_2(self): + keys = ["key1", "key2", "key3"] + dotstring = "key4" + level = 2 + + expected_result = "key4.key3" + + result = create_new_dotstring(keys, dotstring, level) + + self.assertEqual(result, expected_result) + + def test_create_new_dotstring_level_3(self): + keys = ["key1", "key2", "key3"] + dotstring = "key4" + level = 3 + + expected_result = "key4" + + result = create_new_dotstring(keys, dotstring, level) + + self.assertEqual(result, expected_result) + +# create_dict_entry(dotstring, value) +# takes a dotstring and a value, returns a dictionary where the keys are created from the dot string representation +class TestCreateDictEntry(unittest.TestCase): + def test_create_dict_entry(self): + dotstring = "key1.key2.key3" + value = "value" + + expected_result = { + "key1": { + "key2": { + "key3": "value" + } + } + } + + result = create_dict_entry(dotstring, value) + + self.assertEqual(result, expected_result) + +# convert_values_file(file, results_dir) +class TestConvertValuesFile(unittest.TestCase): + def setUp(self): + original_test_config_file = 'tests/configs/test_convert_values_file.yaml' + self.expected_result_file = 'tests/configs/test_convert_values_file_result.yaml' + self.temp_test_config_file = 'test_values.yaml' + self.test_results_dir = 'test_results_dir' + shutil.copy(original_test_config_file, self.temp_test_config_file) + + def tearDown(self): + if os.path.exists(self.temp_test_config_file): + os.remove(self.temp_test_config_file) + if os.path.exists(self.test_results_dir): + shutil.rmtree(self.test_results_dir) + + def test_convert_values_file(self): + convert_values_file(self.temp_test_config_file, self.test_results_dir) + self.assertTrue(os.path.exists(self.test_results_dir)) + self.assertTrue(os.path.exists(os.path.join(self.test_results_dir, 'enterprise.test_values.yaml'))) + self.assertTrue(os.path.exists(os.path.join(self.test_results_dir, 'dotstring.txt'))) + converted = dict() + with open(os.path.join(self.test_results_dir, 'enterprise.test_values.yaml'), 'r') as content: + converted = yaml.safe_load(content) + + with open(self.expected_result_file, 'r') as expected_content: + expected_result = yaml.safe_load(expected_content) + + self.assertEqual(converted, expected_result) + +# replace_keys_with_mappings(dot_string_dict, results_dir): +# returns a dictionary where the keys are created from the dot string representation +class TestReplaceKeysWithMappings(unittest.TestCase): + def setUp(self): + self.results_dir = "test_results_dir" + + def tearDown(self): + if os.path.exists(self.results_dir): + shutil.rmtree(self.results_dir) + + def test_replace_keys_with_mappings(self): + + dot_string_dict = {"anchore-feeds-db.persistence.size": 100} + expected_result = { + 'postgresql': {'auth': {'username': 'anchoreengine'}}, + 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + "feeds": { + "feeds-db": { + "primary": { + "persistence": { + "size": 100 + } + } + } + } + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[0], expected_result) + + def test_replace_keys_with_mappings_env_var(self): + + dot_string_dict = {"anchoreApi.maxRequestThreads": 999} + expected_result = { + 'api': + {'extraEnv': [ + {'name': 'ANCHORE_MAX_REQUEST_THREADS', 'value': 999} + ]} + } + result = replace_keys_with_mappings(dot_string_dict, self.results_dir) + self.assertEqual(result[1], expected_result) + + anchore_config_expected_results = { + 'anchoreConfig': {'user_authentication': {'hashed_passwords': False}}, + 'postgresql': {'auth': {'username': 'anchoreengine'}} + } + self.assertEqual(result[0], anchore_config_expected_results) + +if __name__ == '__main__': + unittest.main() diff --git a/scripts/hooks/helm-unittest.sh b/scripts/hooks/helm-unittest.sh new file mode 100755 index 00000000..b01e4a45 --- /dev/null +++ b/scripts/hooks/helm-unittest.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +readonly DEBUG=${DEBUG:-unset} +if [ "${DEBUG}" != unset ]; then + set -x +fi + +if [[ ! $(which helm) ]]; then + echo "helm not found. Please install helm and try again" + exit 1 +fi + +if ! helm plugin list | grep -q unittest; then + echo "helm-unittest plugin not found. Press 'y' to install with helm or any other key to skip" + read -r install_helm_unittest + if [[ "$install_helm_unittest" != "y" ]]; then + exit 1 + fi + helm plugin install https://github.com/helm-unittest/helm-unittest.git +fi + +files_changed="$(git diff --name-only origin/main | sort | uniq)" +# Adding || true to avoid "Process exited with code 1" errors +charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "stable/[^/]*" | sort | uniq || true)" + +charts_to_test=("stable/enterprise" "stable/feeds") + +for chart in ${charts_dirs_changed}; do + for charts_to_test in "${charts_to_test[@]}"; do + if [[ "$chart" == "$charts_to_test" ]]; then + echo "Running unit tests for ${chart}" + pushd "${chart}" || exit + helm repo add anchore https://charts.anchore.io/stable + helm dep up + helm unittest . + popd || exit + fi + done +done diff --git a/scripts/hooks/readme-generator.sh b/scripts/hooks/readme-generator.sh new file mode 100755 index 00000000..ce2f11b2 --- /dev/null +++ b/scripts/hooks/readme-generator.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +readonly DEBUG=${DEBUG:-unset} +if [ "${DEBUG}" != unset ]; then + set -x +fi + +if [[ ! $(which readme-generator) ]]; then + echo "readme-generator not found. Press 'y' to install with npm or any other key to skip" + read -r install_readme_generator + if [[ "$install_readme_generator" != "y" ]]; then + exit 1 + fi + if [[ ! $(which npm) ]]; then + echo "npm not found. Please install npm and try again" + exit 1 + fi + npm install -g @bitnami/readme-generator-for-helm +fi + +files_changed="$(git diff --name-only origin/main | sort | uniq)" +# Adding || true to avoid "Process exited with code 1" errors +charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "stable/[^/]*" | sort | uniq || true)" + +chart_with_metadata=("stable/enterprise" "stable/feeds" "stable/ecs-inventory") + +for chart in ${charts_dirs_changed}; do + for chart_with_metadata in "${chart_with_metadata[@]}"; do + if [[ "$chart" == "$chart_with_metadata" ]]; then + echo "Updating README.md for ${chart}" + readme-generator --values "${chart}/values.yaml" --readme "${chart}/README.md" + fi + done +done diff --git a/stable/anchore-admission-controller/Chart.yaml b/stable/anchore-admission-controller/Chart.yaml index e34e37a7..9b820450 100644 --- a/stable/anchore-admission-controller/Chart.yaml +++ b/stable/anchore-admission-controller/Chart.yaml @@ -1,12 +1,13 @@ +apiVersion: v1 name: anchore-admission-controller +version: 0.5.0 +appVersion: 0.5.0 description: A kubernetes admission controller for validating and mutating webhooks that operates against Anchore Engine to make access decisions and annotations -apiVersion: v1 -appVersion: 0.3.0 -version: 0.3.0 home: https://github.com/anchore/kubernetes-admission-controller maintainers: - name: zhill email: zach@anchore.com - name: btodhunter email: bradyt@anchore.com -icon: https://anchore.com/wp-content/uploads/2016/08/anchore.png +icon: https://anchoreprd.wpengine.com/wp-content/uploads/2021/12/favicon.png +kubeVersion: ^1.19.0-0 diff --git a/stable/anchore-admission-controller/README.md b/stable/anchore-admission-controller/README.md index 9389eea9..9cd5d86e 100644 --- a/stable/anchore-admission-controller/README.md +++ b/stable/anchore-admission-controller/README.md @@ -16,7 +16,7 @@ to deploy one with: helm install --name anchore stable/anchore-engine ``` -Setup of policies and users is covered in the anchore documentation, for this readme we'll use admin user credentials, but it +Setup of policies and users is covered in the anchore documentation, for this readme we'll use admin user credentials, but it is *strongly* suggested that you use a non-admin user for the controller credential. 1. Create a secret for the anchore credentials that the controller will use to make api calls to Anchore. This must be done out-of-band of the chart creation and should be in the @@ -27,7 +27,7 @@ same namespace you will deploy the chart to. The file must be a json file with t "users": [ { "username": "user1", "password": "password"}, { "uesrname": "user2", "password": "password2"}, - ... + ... ] } ``` @@ -71,15 +71,17 @@ It will remove kubernetes objects which are not removed by a helm delete. Pass t | Key | Expected Type | Default Value | Description | |---|---|---|---| -|replicaCount | int | 1 | replicas, should generally only need one +|replicaCount | int | 1 | replicas, should generally only need one |---|---|---|---| |logVerbosity | int | 6 | log verbosity of controller, 1 = error, 2 warn, 3 debug.... |---|---|---|---| -|image | str | release tag | Tag including registry and repository for image to use +|image | str | release tag | Tag including registry and repository for image to use |---|---|---|---| |imagePullPolicy | str | IfNotPresent | Standard k8s pull policy setting |---|---|---|---| -|service.name | str | anchoreadmissioncontroller | Name for the svc instance +|imagePullSecrets | array | [] | Image pull secrets +|---|---|---|---| +|service.name | str | anchoreadmissioncontroller | Name for the svc instance |---|---|---|---| |service.type | str | ClusterIp | Type to use for k8s service definition |---|---|---|---| @@ -99,6 +101,8 @@ It will remove kubernetes objects which are not removed by a helm delete. Pass t |---|---|---|---| |requestAnalysis | boolean | true | Ask anchore to analyze an image that isn't already analyzed |---|---|---|---| +|initCa.image | str | cfssl/cfssl:latest | Tag including registry and repository for the initCa image +|---|---|---|---| |initCa.extraEnv | array | [] | Define custom environment variables to pass to init-ca pod | |---|---|---|---| diff --git a/stable/anchore-admission-controller/ci/fake-values.yaml b/stable/anchore-admission-controller/ci/fake-values.yaml index 3d1c6db8..2983ae32 100644 --- a/stable/anchore-admission-controller/ci/fake-values.yaml +++ b/stable/anchore-admission-controller/ci/fake-values.yaml @@ -4,3 +4,4 @@ credentials: password: password1 - username: user2 password: password2 +anchoreEndpoint: http://engine-anchore-engine-api:8228 diff --git a/stable/anchore-admission-controller/templates/_helpers.tpl b/stable/anchore-admission-controller/templates/_helpers.tpl index 09924947..c0bf7377 100644 --- a/stable/anchore-admission-controller/templates/_helpers.tpl +++ b/stable/anchore-admission-controller/templates/_helpers.tpl @@ -12,7 +12,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this */}} {{- define "anchore-admission-controller.fullname" -}} {{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- default (printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-") .Values.fullnameOverride -}} {{- end -}} {{/* @@ -29,6 +29,9 @@ Common labels app.kubernetes.io/name: {{ include "anchore-admission-controller.name" . }} helm.sh/chart: {{ include "anchore-admission-controller.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} +{{- with .Values.extraLabels}} +{{ toYaml . }} +{{- end }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} diff --git a/stable/anchore-admission-controller/templates/deployment.yaml b/stable/anchore-admission-controller/templates/deployment.yaml index 4eb67227..6c59d50a 100644 --- a/stable/anchore-admission-controller/templates/deployment.yaml +++ b/stable/anchore-admission-controller/templates/deployment.yaml @@ -3,6 +3,10 @@ kind: Deployment metadata: name: {{ template "anchore-admission-controller.fullname" . }} labels: {{- include "anchore-admission-controller.labels" . | nindent 4 }} + annotations: + {{- with .Values.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} spec: replicas: {{ .Values.replicaCount }} selector: @@ -12,6 +16,10 @@ spec: template: metadata: labels: {{- include "anchore-admission-controller.labels" . | nindent 8 }} + annotations: + {{- with .Values.annotations }} + {{ toYaml . | nindent 8 }} + {{- end }} spec: serviceAccountName: {{ template "anchore-admission-controller.fullname" . }} volumes: @@ -25,16 +33,22 @@ spec: - name: anchore-auth secret: secretName: {{ if .Values.existingCredentialsSecret }}{{ .Values.existingCredentialsSecret }}{{ else }}{{ template "anchore-admission-controller.fullname" . }}{{ end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} containers: - name: {{ .Chart.Name }} image: "{{ .Values.image }}" imagePullPolicy: {{ .Values.imagePullPolicy }} command: - - "/anchore-kubernetes-admission-controller" + - "/ko-app/kubernetes-admission-controller" - "--audit-log-path=-" - "--tls-cert-file=/var/serving-cert/tls.crt" - "--tls-private-key-file=/var/serving-cert/tls.key" - - "--v={{ .Values.logVerbosity }}" + - "-v{{ .Values.logVerbosity }}" - "--secure-port={{ .Values.service.internalPort }}" ports: - containerPort: {{ .Values.service.internalPort }} @@ -57,12 +71,15 @@ spec: - name: CREDENTIALS_FILE_PATH value: /credentials/credentials.json resources: {{- toYaml .Values.resources | nindent 12 }} - {{- if .Values.nodeSelector }} - nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} - {{- end }} - {{- if .Values.tolerations }} - tolerations: {{- toYaml .Values.tolerations | nindent 8 }} - {{- end }} - {{- if .Values.affinity }} - affinity: {{- toYaml .Values.affinity | nindent 8 }} - {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- toYaml .Values.affinity | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} diff --git a/stable/anchore-admission-controller/templates/init-ca/init-ca-hook.yaml b/stable/anchore-admission-controller/templates/init-ca/init-ca-hook.yaml index 7a4a6e4c..3adfbe26 100644 --- a/stable/anchore-admission-controller/templates/init-ca/init-ca-hook.yaml +++ b/stable/anchore-admission-controller/templates/init-ca/init-ca-hook.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: name: "{{ .Release.Name }}-init-ca" - labels: + labels: {{- include "anchore-admission-controller.labels" . | nindent 4 }} app: {{ template "anchore-admission-controller.fullname" . }} component: admission-server annotations: @@ -11,7 +11,7 @@ metadata: spec: template: metadata: - labels: + labels: {{- include "anchore-admission-controller.labels" . | nindent 8 }} app: {{ template "anchore-admission-controller.fullname" . }} component: admission-server spec: @@ -21,9 +21,15 @@ spec: - name: init-ca-script configMap: name: {{.Release.Name}}-init-ca + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} containers: - name: create-ca - image: "cfssl/cfssl:latest" + image: "{{ .Values.initCa.image }}" command: - bash - -xe @@ -35,4 +41,19 @@ spec: {{- with .Values.initCa.extraEnv }} {{- toYaml . | nindent 8 }} {{- end }} - + {{- with .Values.initCa.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- toYaml .Values.affinity | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} diff --git a/stable/anchore-admission-controller/templates/init-ca/init-ca-script.yaml b/stable/anchore-admission-controller/templates/init-ca/init-ca-script.yaml index fd59edbb..6ffc98e6 100644 --- a/stable/anchore-admission-controller/templates/init-ca/init-ca-script.yaml +++ b/stable/anchore-admission-controller/templates/init-ca/init-ca-script.yaml @@ -88,7 +88,7 @@ data: sed "s/TLS_SERVING_KEY/$(base64 ${CERT_DIR}/serving-{{ template "anchore-admission-controller.fullname" . }}.{{ .Release.Namespace }}.svc.key | tr -d '\n')/g" | kubectl -n {{ .Release.Namespace }} apply -f - cat > api-service.yaml <=9.6) which may be handled by the chart or supplied externally, and executes in a service-based architecture utilizing the following Anchore Engine services: External API, SimpleQueue, Catalog, Policy Engine, and Analyzer. - -This chart can also be used to install the following Anchore Enterprise services: GUI, RBAC, Reporting, Notifications & On-premises Feeds. Enterprise services require a valid Anchore Enterprise license, as well as credentials with access to the private DockerHub repository hosting the images. These are not enabled by default. +This chart deploys the Anchore Enterprise container image analysis system. Anchore requires a PostgreSQL database (>=9.6) which may be handled by the chart or supplied externally, and executes in a service-based architecture utilizing the following Anchore Enterprise services: External API, SimpleQueue, Catalog, Policy Engine, Analyzer, GUI, RBAC, Reporting, Notifications and On-premises Feeds. Enterprise services require a valid Anchore Enterprise license, as well as credentials with access to the private DockerHub repository hosting the images. These are not enabled by default. Each of these services can be scaled and configured independently. -See [Anchore Engine](https://github.com/anchore/anchore-engine) for more project details. - -## Chart Details - -The chart is split into global and service specific configurations for the OSS Anchore Engine, as well as global and services specific configurations for the Enterprise components. - -* The `anchoreGlobal` section is for configuration values required by all Anchore Engine components. -* The `anchoreEnterpriseGlobal` section is for configuration values required by all Anchore Engine Enterprise components. -* Service specific configuration values allow customization for each individual service. - -For a description of each component, view the official documentation at: [Anchore Enterprise Service Overview](https://docs.anchore.com/current/docs/overview/architecture/) - -## Installing the Anchore Engine Helm Chart - -### TL;DR - -```bash -helm repo add anchore https://charts.anchore.io -helm install my-release anchore/anchore-engine -``` - -Anchore Engine will take approximately three minutes to bootstrap. After the initial bootstrap period, Anchore Engine will begin a vulnerability feed sync. During this time, image analysis will show zero vulnerabilities until the sync is completed. This sync can take multiple hours depending on which feeds are enabled. The following anchore-cli command is available to poll the system and report back when the engine is bootstrapped and the vulnerability feeds are all synced up. `anchore-cli system wait` - -The recommended way to install the Anchore Engine Helm Chart is with a customized values file and a custom release name. It is highly recommended to set non-default passwords when deploying. All passwords are set to defaults specified in the chart. It is also recommended to utilize an external database, rather then using the included postgresql chart. - -Create a new file named `anchore_values.yaml` and add all desired custom values (see the following examples); then run the following command: - -#### Helm v3 installation - -```bash -helm repo add anchore https://charts.anchore.io -helm install -f anchore_values.yaml anchore/anchore-engine -``` - -##### Example anchore_values.yaml - using chart managed PostgreSQL service with custom passwords. - -*Note: Installs with chart managed PostgreSQL database. This is not a guaranteed production ready config.* - -```yaml -## anchore_values.yaml - -postgresql: - postgresPassword: - persistence: - size: 50Gi - -anchoreGlobal: - defaultAdminPassword: - defaultAdminEmail: -``` - -## Adding Enterprise Components +## Anchore Enterprise Components The following features are available to Anchore Enterprise customers. Please contact the Anchore team for more information about getting a license for the Enterprise features. [Anchore Enterprise Demo](https://anchore.com/demo/) @@ -76,6 +21,24 @@ anchoreGlobal: * Kubernetes runtime image inventory/scanning ``` +## Chart Details + +The chart is split into global and service specific configurations for all Anchore Enterprise components. + +* The `anchoreGlobal` section is for configuration values required by all Anchore components. +* The `anchoreEnterpriseGlobal` section is for configuration values required by all Anchore Enterprise components. +* Service specific configuration values allow customization for each individual service. + +For a description of each component, view the official documentation at: [Anchore Enterprise Service Overview](https://docs.anchore.com/current/docs/overview/architecture/) + +## Installing the Anchore Helm Chart + +Anchore will take approximately three minutes to bootstrap. After the initial bootstrap period, Anchore will begin a vulnerability feed sync. During this time, image analysis will show zero vulnerabilities until the sync is completed. This sync can take multiple hours depending on which feeds are enabled. The following anchore-cli command is available to poll the system and report back when the engine is bootstrapped and the vulnerability feeds are all synced up. `anchore-cli system wait` + +The recommended way to install the Anchore Helm Chart is with a customized values file and a custom release name. It is highly recommended to set non-default passwords when deploying. All passwords are set to defaults specified in the chart. It is also recommended to utilize an external database, rather then using the included postgresql chart. + +Create a new file named `anchore_values.yaml` and add all desired custom values (see the following examples); then run the following command: + ### Enabling Enterprise Services Enterprise services require an Anchore Enterprise license, as well as credentials with @@ -127,53 +90,28 @@ anchoreGlobal: enableMetrics: True anchoreEnterpriseGlobal: - enabled: True + enabled: true anchore-feeds-db: postgresPassword: persistence: size: 20Gi -anchore-ui-redis: - password: +ui-redis: + auth: + password: ``` -## Installing on OpenShift - -As of chart version 1.3.1, deployments to OpenShift are fully supported. Due to permission constraints when utilizing OpenShift, the official RHEL postgresql image must be utilized, which requires custom environment variables to be configured for compatibility with this chart. - -### Example anchore_values.yaml - deploying on OpenShift - -*Note: Installs with chart managed PostgreSQL database. This is not a guaranteed production ready config.* +#### Helm v3 installation -```yaml -## anchore_values.yaml +```bash +helm repo add anchore https://charts.anchore.io +helm install -f anchore_values.yaml anchore/anchore-engine +``` -postgresql: - image: registry.access.redhat.com/rhscl/postgresql-96-rhel7 - imageTag: latest - extraEnv: - - name: POSTGRESQL_USER - value: anchoreengine - - name: POSTGRESQL_PASSWORD - value: anchore-postgres,123 - - name: POSTGRESQL_DATABASE - value: anchore - - name: PGUSER - value: postgres - - name: LD_LIBRARY_PATH - value: /opt/rh/rh-postgresql96/root/usr/lib64 - - name: PATH - value: /opt/rh/rh-postgresql96/root/usr/bin:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - postgresPassword: - persistence: - size: 50Gi +## Installing on OpenShift -anchoreGlobal: - defaultAdminPassword: - defaultAdminEmail: - openShiftDeployment: True -``` +As of chart version 1.3.1, deployments to OpenShift are fully supported. Due to permission constraints when utilizing OpenShift, the official RHEL postgresql image must be utilized, which requires custom environment variables to be configured for compatibility with this chart. To perform an Enterprise deployment on OpenShift, use the following anchore_values.yaml configuration @@ -189,7 +127,7 @@ postgresql: - name: POSTGRESQL_USER value: anchoreengine - name: POSTGRESQL_PASSWORD - value: anchore-postgres,123 + value: - name: POSTGRESQL_DATABASE value: anchore - name: PGUSER @@ -198,7 +136,7 @@ postgresql: value: /opt/rh/rh-postgresql96/root/usr/lib64 - name: PATH value: /opt/rh/rh-postgresql96/root/usr/bin:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - postgresPassword: + postgresPassword: persistence: size: 20Gi @@ -207,9 +145,10 @@ anchoreGlobal: defaultAdminEmail: enableMetrics: True openShiftDeployment: True - -anchoreEnterpriseGlobal: - enabled: True + securityContext: + runAsUser: null + runAsGroup: null + fsGroup: null anchore-feeds-db: image: registry.access.redhat.com/rhscl/postgresql-96-rhel7 @@ -218,7 +157,7 @@ anchore-feeds-db: - name: POSTGRESQL_USER value: anchoreengine - name: POSTGRESQL_PASSWORD - value: anchore-postgres,123 + value: - name: POSTGRESQL_DATABASE value: anchore - name: PGUSER @@ -227,17 +166,31 @@ anchore-feeds-db: value: /opt/rh/rh-postgresql96/root/usr/lib64 - name: PATH value: /opt/rh/rh-postgresql96/root/usr/bin:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - postgresPassword: + postgresPassword: persistence: size: 50Gi -anchore-ui-redis: - password: +ui-redis: + auth: + password: + master: + podSecurityContext: + enabled: true + fsGroup: 1000670000 + containerSecurityContext: + enabled: true + runAsUser: 1000670000 + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ``` # Chart Updates -See the anchore-engine [CHANGELOG](https://github.com/anchore/anchore-engine/blob/master/CHANGELOG.md) for updates to Anchore Engine. +See the Anchore [Release Notes](https://docs.anchore.com/current/docs/releasenotes/) for updates to Anchore. ## Upgrading from previous chart versions @@ -245,6 +198,154 @@ A Helm post-upgrade hook job will shut down all previously running Anchore servi The upgrade will only be considered successful when this job completes successfully. Performing an upgrade will cause the Helm client to block until the upgrade job completes and the new Anchore service pods are started. To view progress of the upgrade process, tail the logs of the upgrade jobs `anchore-engine-upgrade` and `anchore-enterprise-upgrade`. These job resources will be removed upon a successful Helm upgrade. +# Chart Version 1.28.7 + +* Anchore Enterprise image bumped to v4.9.5 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/495/) + +# Chart Version 1.28.4 + +* Anchore Enterprise image bumped to v4.9.4 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/494/) + +# Chart Version 1.28.0 + +* Updated ingress configuration to allow exposing both v1 and v2 api endpoints. +* **WARNING** this version of the chart makes breaking changes to the following Ingress values. These values have all been updated from a string to a list. + * `.Values.ingress.apiPath` -> `.Values.ingress.apiPaths` + * `.Values.ingress.feedsPath` -> `.Values.ingress.feedsPaths` + * `.Values.ingress.reportsPath` -> `.Values.ingress.reportsPaths` +* Update your values file accordingly: + + ```yaml + ingress: + apiPaths: + - /v1/ + - /v2/ + - /version/ + feedsPaths: + - /v1/feeds/ + - /v2/feeds/ + reportsPaths: + - /v1/reports/ + - /v2/reports/ + ``` + +# Chart Version 1.27.3 + +* Added option to allow nodePorts to each service created as part of an anchore deployment. For more information about nodePorts, see [The Kubernetes Docs](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport-custom-port) + +# Chart Version 1.27.2 + +* Anchore Enterprise image updated to v4.9.1 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/491/) +* Bumped Redis chart to the latest version. +* Reverted the change in v1.27.1 to use an alias for the bitnami dependency. This dependency is now using an OCI url which is supported by the latest version of chart-releaser-action. + +# Chart Version 1.27.1 + +* Updating chart-releaser-action to v1.5.0 required using an alias for the bitnami dependency because chart-releaser doesn't currently support using a url directly in the dependency declaration. You may be required to add bitnami as an alias to the bitnami repos. eg `helm repo add bitnami https://charts.bitnami.com/bitnami` + +# Chart Version 1.27.0 + +* Anchore Enterprise image updated to v4.9.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/490/) + +# Chart Version 1.26.3 + +* Anchore Enterprise image updated to v4.8.1 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/481/) + +# Chart version 1.26.1 + +* Added `.Values.anchoreGlobal.usePreupgradeHook` to enable doing the enterprise and feeds upgrade jobs using a helm pre-upgrade hook. This is useful when doing helm upgrade with the --wait flag, or for ArgoCD. Enabling this option will create a service account and role with permissions to get/update/patch deployments and list pods. See templates/hooks/pre-upgrade/anchore_upgrade_role.yaml for a complete list of roles. This is disabled by default. + +# Chart version 1.26.0 + +* Anchore Enterprise image updated to v4.8.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/480/) + +## Chart version 1.25.0 + +* Anchore Enterprise image updated to v4.7.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/470/) + +## Chart version 1.24.0 + +* Anchore Enterprise image updated to v4.6.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/460/) + +* `.Values.anchoreGlobal.doSourceAtEntry.filePath` has been changed to `.Values.anchoreGlobal.doSourceAtEntry.filePaths` which accepts a list of file paths. This allows for multiple files to be sourced prior to starting the Anchore services instead of a single file. + * Remove `.Values.anchoreGlobal.doSourceAtEntry.filePath` and add the following to your values file: + + ```yaml + anchoreGlobal: + doSourceAtEntry: + filePaths: + - /path/to/file1 + - /path/to/file2 + ``` + +* Updated the configuration for Anchore Enterprise database connections. This will ensure that special characters are handled properly in database passwords. Also allows configuring the db hostname and port separately. + + * If your postgresql connection is using a non-standard port, you will need to update your values file to include the hostname and port. For example: + + ```yaml + postgresql: + externalEndpoint: + postgresPort: + ``` + + * If you're using external secrets and an non-standard port, you will need to update your secrets to include the hostname and port. + + ```yaml + ANCHORE_DB_HOST: + ANCHORE_DB_PORT: + ``` + +## Chart version 1.23.0 + +* Anchore Enterprise image updated to v4.5.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/450/) + +## Chart version 1.22.0 + +* Anchore Enterprise image updated to v4.4.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/440/) +* Allow configuration of the URL used for pulling Ubuntu vulnerability feed. +* The UI now emits prometheus metrics when `.Values.anchoreGlobal.enableMetrics=true` + +## Chart version 1.21.0 + +* Revamped how the chart is configured when using existing secrets. Users upgrading from a previous chart version will need to update their values file to match the new convention. Update the following in your values file: + * Set `.Values.anchoreGlobal.useExistingSecrets=true` + * Update your existing secrets to include all environment variables used by deployments + * Add to the secret specified in `.Values.anchoreGlobal.existingSecretName`: + * ANCHORE_DB_HOST + * ANCHORE_DB_USER + * ANCHORE_DB_NAME + * Add to secret specified in `.Values.anchoreEnterpriseFeeds.existingSecretName`: + * ANCHORE_FEEDS_DB_HOST + * ANCHORE_FEEDS_DB_USER + * ANCHORE_FEEDS_DB_NAME + * Update the following keys: + * `.Values.anchoreGlobal.existingSecret` -> `.Values.anchoreGlobal.existingSecretName` + * `.Values.anchoreEnterpriseFeeds.existingSecret` -> `.Values.anchoreEnterpriseFeeds.existingSecretName` + * `.Values.anchoreEnterpriseUi.existingSecret` -> `.Values.anchoreEnterpriseUi.existingSecretName` +* See the [existing secrets section](#utilize-an-existing-secret) for more details. + +## Chart version 1.20.1 + +* Anchore Enterprise image update to v4.3.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/430/). +* Add configuration options for wolfi feed driver. + +## Chart version 1.20.0 + +* Anchore Enterprise image update to v4.2.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/420/). +* Removed embedded k8s runtime inventory configurations. + * Deletes service account, role, & rolebindigs created by `.Values.anchoreCatalog.createServiceAccount`. + * To enable cluster runtime inventory use the [Kai Helm Chart](https://github.com/anchore/anchore-charts/tree/main/stable/kai). + +## Chart version 1.19.0 + +* Redis chart updated from version 10 to 16.11.3 updated to the latest version as bitnami has started removing older version of their charts. +* redis will by default run in the `standalone` architecture. +* `anchore-ui-redis` in the helm values should now be `ui-redis` + * if you've set the the `password` value under `anchore-ui-redis`, you will now have to change it to `auth.password`, making the end change `ui-redis.auth.password` + +* WARNING: Users may be logged out from the platform after this happens since this will delete the old redis deployment and spin up a new one in its place + * For more information on why this is necessary, see [the breaking change here](https://github.com/bitnami/charts/tree/master/bitnami/redis/#to-1400) + ## Chart version 1.18.0 * Anchore Enterprise image updated to v4.0.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/400/) @@ -261,7 +362,7 @@ The upgrade will only be considered successful when this job completes successfu ## Chart version 1.17.0 -Chart version 1.17.0 is an Enterprise focused release. Anchore Engine users will see no change in behavior from this release. +Chart version 1.17.0 is an Enterprise focused release. Anchore users will see no change in behavior from this release. For Enterprise users, this release specifically helps reduce downtime needed during the transition from the v1 scanner to the v2 scanner. This version sets the GrypeDB driver to run in the feed service v1-scanner deployments so that the GrypeDB is ready when the update to the v2 scanner is made and thus reduces effective downtime during the maintenance window needed for that configuration change. @@ -286,38 +387,38 @@ The impacts of this upgrade are as follows: * For deployments currently utilizing the V1 (legacy) vulnerability provider, configured with `.Values.anchorePolicyEngine.vulnerabilityProvider=legacy`, this upgrade will enable the GrypeDB Driver on the Enterprise Feeds service. * The GrypeDB driver can be manually disabled for legacy deployments using `.Values.anchoreEnterpriseFeeds.grypeDriverEnabled=false` -* For deployments of Anchore Engine, configured with `.Values.anchoreEnterpriseGlobal=false`, this upgrade will have zero impact. +* For deployments of Anchore, configured with `.Values.anchoreEnterpriseGlobal=false`, this upgrade will have zero impact. * For Enterprise deployments currently utilizing the Grype vulnerability provider, configured with `.Values.anchorePolicyEngine.vulnerabilityProvider=grype`, this release will have zero impact. ## Chart version 1.16.0 -* Anchore Engine image updated to v1.1.0 - [Release Notes](https://engine.anchore.io/docs/releasenotes/110/) +* Anchore image updated to v1.1.0 - [Release Notes](https://engine.anchore.io/docs/releasenotes/110/) * Anchore Enterprise image updated to v3.3.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/330/) ## Chart version 1.15.0 Chart version v1.15.0 sets the V2 vulnerability scanner, based on [Grype](https://github.com/anchore/grype), as the default for new deployments. **Users upgrading from chart versions prior to v1.15.0 will need to explicitly set their preferred vulnerability provider using `.Values.anchorePolicyEngine.vulnerabilityProvider`.** If the vulnerability provider is not explicitly set, Helm will prevent an upgrade from being initiated. -* Anchore Engine image updated to v1.0.0 - [Release Notes](https://engine.anchore.io/docs/releasenotes/100/) +* Anchore image updated to v1.0.0 - [Release Notes](https://engine.anchore.io/docs/releasenotes/100/) * Anchore Enterprise image updated to v3.2.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/320/) * Enterprise Feeds - Now uses a PVC for the persistent workspace directory. This directory is used by the vulnerability drivers for downloading vulnerability data, and should be persistent for optimal performance. * Enterprise Feeds - When enabling the Ruby Gems vulnerability driver, the Helm chart will now spin up an ephemeral Postgresql deployment for the Feeds service to load Ruby vulnerability data. ## Chart version 1.14.0 -* Anchore Engine image updated to v0.10.1 - [Release Notes](https://engine.anchore.io/docs/releasenotes/0101/) +* Anchore image updated to v0.10.1 - [Release Notes](https://engine.anchore.io/docs/releasenotes/0101/) * Anchore Enterprise image updated to v3.1.1 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/311/) * Enterprise Feeds - MSRC feeds no longer require an access token. No changes are needed, however MSRC access tokens can now be removed from values and/or existing secrets. ## Chart version 1.13.0 -* Anchore Engine image updated to v0.10.0 - [Release Notes](https://engine.anchore.io/docs/releasenotes/0100/) +* Anchore image updated to v0.10.0 - [Release Notes](https://engine.anchore.io/docs/releasenotes/0100/) * Anchore Enterprise image updated to v3.1.0 - [Release Notes](https://docs.anchore.com/current/docs/releasenotes/310/) * If utilizing the Enterprise Runtime Inventory feature, the catalog service can now be configured to automatically setup RBAC for image discovery within the cluster. This is configured under `.Values.anchoreCatalog.runtimeInventory` ## Chart version 1.12.0 -* Anchore Engine image updated to v0.9.1 +* Anchore image updated to v0.9.1 * Anchore Enterprise images updated to v3.0.0 * Existing secrets now work for Enterprise feeds and Enterprise UI - see [existing secrets configuration](#-Utilize-an-Existing-Secret) * Anchore admin default password no longer defaults to `foobar`. If no password is specified, a random string will be generated. @@ -328,7 +429,7 @@ Chart dependency declarations have been updated to be compatible with Helm v3.4. ## Chart version 1.8.0 -The following Anchore-Engine features were added with this version: +The following features were added with this version: * Malware scanning - see .Values.anchoreAnalyzer.configFile.malware * Binary content scanning @@ -339,183 +440,13 @@ For more details see - https://docs.anchore.com/current/docs/engine/releasenotes ## Chart version 1.7.0 -Starting with version 1.7.0, the anchore-engine chart will be hosted on charts.anchore.io. If you're upgrading from a previous version of the chart, you will need to delete your previous deployment and redeploy Anchore Engine using the chart from the Anchore Charts repository. - -This version of the chart includes the dependent Postgresql chart in the charts/ directory rather then pulling it from upstream. All apiVersions were updated for compatibility with Kubernetes v1.16+ and the postgresql image has been updated to version 9.6.18. The chart version also updates to the latest version of the Redis chart from Bitnami. These dependency updates require deleting and re-installing your chart. If the following process is performed, no data should be lost. - -## Migrating To The New Anchore Charts Repository - -For these examples, we assume that your namespace is called `my-namespace` and your Anchore installation is called `my-anchore`. - -These examples use Helm version 3 and kubectl client version 1.18, server version 1.18. - -### **ENSURE MIGRATION IS PERFORMED SEPARATELY FROM ANCHORE ENGINE UPGRADES** - -All helm installation steps will include a flag to override the Anchore Engine/Enterprise images with your current running version. You can upgrade your version of Anchore after moving to the new chart from charts.anchore.io. Record the version of your Anchore deployment and use it anytime the instructions refer to the Engine Code Version. - -### Determine Currently Running Anchore Version - -To determine the currently running Anchore version, connect to the anchore-api pod, issue the following command, and record the Engine Code Version: - -```bash -[anchore@anchore-api anchore-engine]$ anchore-cli system status -Service analyzer (anchore-anchore-engine-analyzer-7cd9c5cb78-j8n8p, http://anchore-anchore-engine-analyzer:8084): up -Service apiext (anchore-anchore-engine-api-54cff87fcd-s4htm, http://anchore-anchore-engine-api:8228): up -Service catalog (anchore-anchore-engine-catalog-5898dc67d6-64b8n, http://anchore-anchore-engine-catalog:8082): up -Service simplequeue (anchore-anchore-engine-simplequeue-5cc449cc5c-djkf7, http://anchore-anchore-engine-simplequeue:8083): up -Service policy_engine (anchore-anchore-engine-policy-68b99ddf96-d4gbl, http://anchore-anchore-engine-policy:8087): up - -Engine DB Version: 0.0.13 -Engine Code Version: 0.7.2 -``` +Starting with version 1.7.0, the anchore-engine chart will be hosted on charts.anchore.io. If you're upgrading from a previous version of the chart, you will need to delete your previous deployment and redeploy Anchore using the chart from the Anchore Charts repository. -## If Using An External Postgresql Database (not included as chart dependency) - -```bash -helm uninstall --namespace=my-namespace my-anchore -helm repo add anchore https://charts.anchore.io -helm repo update -export ANCHORE_VERSION=0.7.2 # USE YOUR ENGINE CODE VERSION HERE -helm install --namespace=my-namespace --set anchoreGlobal.image=docker.io/anchore/anchore-engine:v${ANCHORE_VERSION} --set anchoreEnterpriseGlobal.image=docker.io/anchore/enterprise:v${ANCHORE_VERSION} -f anchore_values.yaml my-anchore anchore/anchore-engine -``` - -## If Using The Included Postgresql Chart - -When utilizing the included Postgresql chart, you will need to reuse the persistent volume claims that are attached to your current deployment. These existing claims will be utilized when re-installing anchore-engine using the new chart from charts.anchore.io. - -### Determine Your Database PersistentVolumeClaim - -Find the name of the database PersistentVolumeClaim using `kubectl`: - -```bash -$ kubectl get persistentvolumeclaim --namespace my-namespace -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -my-anchore-postgresql Bound pvc-739f6f21-b73b-11ea-a2b9-42010a800176 20Gi RWO standard 2d -``` - -The name of your PersistentVolumeClaim in the example shown is `my-anchore-postgresql`. Note that, as you will need it later. - -Anchore Enterprise users with a standalone Feeds Service will see a different set of PersistentVolumeClaims: - -```bash -$ kubectl get persistentvolumeclaim --namespace my-namespace -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -my-anchore-anchore-feeds-db Bound pvc-cd7ebb6f-bbe0-11ea-b9bf-42010a800020 20Gi RWO standard 3d -my-anchore-postgresql Bound pvc-cd7dc7d2-bbe0-11ea-b9bf-42010a800020 20Gi RWO standard 3d -``` - -The names of the PersistentVolumeClaims in the example shown are `my-anchore-anchore-feeds-db` and `my-anchore-postgresql`. You may see other persistent volume claims, but only `my-anchore-anchore-feeds-db` and `my-anchore-postgresql` are relevant for this migration. Remember the names, as you will need them later. - -#### Uninstall Your Anchore Installation With Helm - -```bash -$ helm uninstall --namespace=my-namespace my-anchore -release "my-anchore" uninstalled -``` - -Anchore Enterprise users will want to remove the Redis DB PersistentVolumeClaim. This will delete all current session data but will not affect stability of the deployment: - -```bash -kubectl delete pvc redis-data-my-anchore-anchore-ui-redis-master-0 -``` - -Your other PersistentVolumeClaims will still be resident in your cluster (we're showing results from an Anchore Enterprise installation that has a standalone Feeds Service below. Anchore Enterprise users without a standalone Feeds Service, and Anchore Engine users will not see `my-anchore-anchore-feeds-db`): - -```bash -$ kubectl get persistentvolumeclaim --namespace my-namespace -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -my-anchore-anchore-feeds-db Bound pvc-a22abf70-bbb9-11ea-840b-42010a8001d8 20Gi RWO standard 3d -my-anchore-postgresql Bound pvc-e6daf90a-bbb8-11ea-840b-42010a8001d8 20Gi RWO standard 3d -``` - -#### Add The New Anchore Helm Chart Repository - -```bash -$ helm repo add anchore https://charts.anchore.io -"anchore" has been added to your repositories - -$ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "anchore" chart repository -``` - -#### Install The Anchore Helm Chart - -Update your anchore_values.yaml file as shown, using the PersistentVolumeClaim values from above: - -Engine only deployment values file example: - -```yaml -# anchore_values.yaml - - postgresql: - persistence: - existingclaim: my-anchore-postgresql -``` - -Enterprise deployment values file example: - -```yaml -# anchore_values.yaml - -postgresql: - persistence: - existingclaim: my-anchore-postgresql - -anchore-feeds-db: - persistence: - existingclaim: my-anchore-anchore-feeds-db -``` - -Install a new Anchore Engine deployment using the chart from charts.anchore.io - -```bash -$ export ANCHORE_VERSION=0.7.2 # USE YOUR ENGINE CODE VERSION HERE -$ helm install --namespace=my-namespace --set anchoreGlobal.image=docker.io/anchore/anchore-engine:v${ANCHORE_VERSION} --set anchoreEnterpriseGlobal.image=docker.io/anchore/enterprise:v${ANCHORE_VERSION} -f anchore_values.yaml my-anchore anchore/anchore-engine - -NAME: my-anchore -LAST DEPLOYED: Thu Jun 25 12:25:33 2020 -NAMESPACE: my-namespace -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -To use Anchore Engine you need the URL, username, and password to access the API. -...more instructions... -``` - -Verify that your PersistentVolumeClaims are bound (output may vary): - -```bash -$ kubectl get persistentvolumeclaim --namespace my-namespace -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -my-anchore-anchore-feeds-db Bound pvc-a22abf70-bbb9-11ea-840b-42010a8001d8 20Gi RWO standard 3d -my-anchore-postgresql Bound pvc-e6daf90a-bbb8-11ea-840b-42010a8001d8 20Gi RWO standard 3d -``` - -Connect to the anchore-api pod and validate that your installation still contains all of your previously scanned images. - -```bash -[anchore@anchore-api anchore-engine]$ anchore-cli image list -Full Tag Image Digest Analysis Status -docker.io/alpine:latest sha256:a15790640a6690aa1730c38cf0a440e2aa44aaca9b0e8931a9f2b0d7cc90fd65 analyzed -docker.io/anchore/anchore-engine:latest sha256:624c9f662233838d1046809135a70ab88d79bd0f2e53dd74bb3d67d10d997bd1 analyzed -docker.io/ubuntu:latest sha256:60f560e52264ed1cb7829a0d59b1ee7740d7580e0eb293aca2d722136edb1e24 analyzed -``` - -You are now running Anchore from the new chart repository, with your data in place. - -## Upgrade To Latest Version of Anchore - -Now that you're migrated to charts.anchore.io, you can upgrade Anchore Engine to the latest version if desired. - -```bash -helm upgrade --namespace my-namespace -f anchore_values.yaml my-anchore anchore/anchore-engine -``` +This version of the chart includes the dependent Postgresql chart in the charts/ directory rather then pulling it from upstream. All apiVersions were updated for compatibility with Kubernetes v1.16+ and the postgresql image has been updated to version 9.6.18. The chart version also updates to the latest version of the Redis chart from Bitnami. These dependency updates require deleting and re-installing your chart. # Configuration -All configurations should be appended to your custom `anchore_values.yaml` file and utilized when installing the chart. While the configuration options of Anchore Engine are extensive, the options provided by the chart are as follows: +All configurations should be appended to your custom `anchore_values.yaml` file and utilized when installing the chart. While the configuration options of Anchore are extensive, the options provided by the chart are as follows: ## Exposing the service outside the cluster using Ingress @@ -536,10 +467,23 @@ ingress: annotations: kubernetes.io/ingress.class: alb alb.ingress.kubernetes.io/scheme: internet-facing - apiPath: /v1/* + apiPaths: + - /v1/* + - /v2/* + - /version/* + feedsPaths: + - /v1/feeds/* + - /v2/feeds/* + reportsPaths: + - /v1/reports/* + - /v2/reports/* uiPath: /* apiHosts: - anchore-api.example.com + feedsHosts: + - anchore-feeds.example.com + reportsHosts: + - anchore-reports.example.com uiHosts: - anchore-ui.example.com @@ -547,8 +491,16 @@ anchoreApi: service: type: NodePort +anchoreEnterpriseFeeds: + service: + type: NodePort + +anchoreEnterpriseReports: + service: + type: NodePort + anchoreEnterpriseUi: - service + service: type: NodePort ``` @@ -559,10 +511,23 @@ ingress: enabled: true annotations: kubernetes.io/ingress.class: gce - apiPath: /v1/* + apiPaths: + - /v1/* + - /v2/* + - /version/* + feedsPaths: + - /v1/feeds/* + - /v2/feeds/* + reportsPaths: + - /v1/reports/* + - /v2/reports/* uiPath: /* apiHosts: - anchore-api.example.com + feedsHosts: + - anchore-feeds.example.com + reportsHosts: + - anchore-reports.example.com uiHosts: - anchore-ui.example.com @@ -570,8 +535,16 @@ anchoreApi: service: type: NodePort +anchoreEnterpriseFeeds: + service: + type: NodePort + +anchoreEnterpriseReports: + service: + type: NodePort + anchoreEnterpriseUi: - service + service: type: NodePort ``` @@ -585,29 +558,69 @@ anchoreApi: ## Utilize an Existing Secret -Secrets should be created prior to running `helm install`. These can be used to override the secret provisioned by the Helm chart, preventing plain text passwords in your values.yaml file. +Rather than passing secrets into the Helm values file directly, users can create secrets in the namespace prior to deploying this Helm chart. When using existing secrets, the chart will load environment variables into deployments from the secret names specified by the following values: + +* `.Values.anchoreGlobal.existingSecretName` [default: anchore-engine-env] +* `.Values.anchoreEnterpriseFeeds.existingSecretName` [default: anchore-enterprise-feeds-env] +* `.Values.anchoreEnterpriseUi.existingSecretName` [default: anchore-enterprise-ui-env] + +To use existing secrets, set the following in your values file: ```yaml anchoreGlobal: - # The secret should define the following environment vars: - # ANCHORE_ADMIN_PASSWORD - # ANCHORE_DB_PASSWORD - # ANCHORE_SAML_SECRET (if applicable) - existingSecret: "anchore-engine-secrets" - + useExistingSecrets: true +``` -anchoreEnterpriseFeeds: - # The secret should define the following environment vars: - # ANCHORE_ADMIN_PASSWORD - # ANCHORE_FEEDS_DB_PASSWORD - # ANCHORE_SAML_SECRET (if applicable) - existingSecret: "anchore-feeds-secrets" - -anchoreEnterpriseUI: - # This secret should define the following ENV vars - # ANCHORE_APPDB_URI - # ANCHORE_REDIS_URI - existingSeccret: "anchore-ui-secrets" +Create the following secrets: +```yaml +# These secrets will work as-is when using helm deployed redis/postgresql with the default chart values and a helm release name of `anchore`. When utilizing these secrets, users are expected to update the environment variables with appropriate configurations for their environment. + +--- +apiVersion: v1 +kind: Secret +metadata: + name: anchore-engine-env +type: Opaque +stringData: + ANCHORE_ADMIN_PASSWORD: foobar1234 + ANCHORE_DB_NAME: anchore + ANCHORE_DB_USER: anchoreengine + ANCHORE_DB_HOST: anchore-postgresql + ANCHORE_DB_PORT: 5432 + ANCHORE_DB_PASSWORD: anchore-postgres,123 + # (if applicable) ANCHORE_SAML_SECRET: foobar,saml1234 + +--- +apiVersion: v1 +kind: Secret +metadata: + name: anchore-enterprise-feeds-env +type: Opaque +stringData: + ANCHORE_ADMIN_PASSWORD: foobar1234 + ANCHORE_FEEDS_DB_NAME: anchore-feeds + ANCHORE_FEEDS_DB_USER: anchoreengine + ANCHORE_FEEDS_DB_PASSWORD: anchore-postgres,123 + ANCHORE_FEEDS_DB_HOST: anchore-anchore-feeds-db + ANCHORE_FEEDS_DB_PORT: 5432 + # (if applicable) ANCHORE_SAML_SECRET: foobar,saml1234 + # (if applicable) ANCHORE_GITHUB_TOKEN: foobar,github1234 + # (if applicable) ANCHORE_NVD_API_KEY: foobar,nvd1234 + # (if applicable) ANCHORE_GEM_DB_NAME: anchore-gems + # (if applicable) ANCHORE_GEM_DB_USER: anchoregemsuser + # (if applicable) ANCHORE_GEM_DB_PASSWORD: foobar1234 + # (if applicable) ANCHORE_GEM_DB_HOST: anchorefeeds-gem-db.example.com:5432 + +--- +apiVersion: v1 +kind: Secret +metadata: + name: anchore-enterprise-ui-env +type: Opaque +stringData: + # if using TLS to connect to Postgresql you must add the ?ssl=[require|verify-ca|verify-full] parameter to the end of the URI + ANCHORE_APPDB_URI: postgresql://anchoreengine:anchore-postgres,123@anchore-postgresql:5432/anchore + ANCHORE_REDIS_URI: redis://nouser:anchore-redis,123@anchore-ui-redis-master:6379 ``` ## Install using an existing/external PostgreSQL instance @@ -657,7 +670,7 @@ cloudsql: *Note: it is recommended to use an external archive driver for production installs.* -The archive subsystem of Anchore Engine is what stores large JSON documents, and can consume substantial storage if +The archive subsystem of Anchore is what stores large JSON documents, and can consume substantial storage if you analyze a lot of images. A general rule for storage provisioning is 10MB per image analyzed, so with thousands of analyzed images, you may need many gigabytes of storage. The Archive drivers now support other backends than just postgresql, so you can leverage external and scalable storage systems and keep the postgresql storage usage to a much lower level. @@ -740,7 +753,7 @@ anchoreCatalog: ```yaml anchoreCatalog: archive: - storage_driver: + storage_driver: name: swift config: auth_version: '2' @@ -779,7 +792,7 @@ This is the default archive driver and requires no additional configuration. ## Prometheus Metrics -Anchore Engine supports exporting prometheus metrics form each container. Do the following to enable metrics: +Anchore supports exporting prometheus metrics form each container. Do the following to enable metrics: ```yaml anchoreGlobal: @@ -791,12 +804,12 @@ know about each pod, and the ports it provides to scrape the metrics. ## Using custom certificates -A secret needs to be created in the same namespace as the anchore-engine chart installation. This secret should contain all custom certs, including CA certs & any certs used for internal TLS communication. -This secret will be mounted to all anchore-engine pods at /home/anchore/certs to be utilized by the system. +A secret needs to be created in the same namespace as the anchore-engine chart installation. This secret should contain all custom certs, including CA certs & any certs used for internal TLS communication. +This secret will be mounted to all Anchore pods at /home/anchore/certs to be utilized by the system. ## Event Notifications -Anchore Engine in v0.2.3 introduces a new events subsystem that exposes system-wide events via both a REST api as well +Anchore in v0.2.3 introduces a new events subsystem that exposes system-wide events via both a REST api as well as via webhooks. The webhooks support filtering to ensure only certain event classes result in webhook calls to help limit the volume of calls if you desire. Events, and all webhooks, are emitted from the core components, so configuration is done in the coreConfig. diff --git a/stable/anchore-engine/enterprise_values.yaml b/stable/anchore-engine/enterprise_values.yaml index 2d29b42b..5251187e 100644 --- a/stable/anchore-engine/enterprise_values.yaml +++ b/stable/anchore-engine/enterprise_values.yaml @@ -1,4 +1,2 @@ anchoreEnterpriseGlobal: enabled: true - - diff --git a/stable/anchore-engine/templates/NOTES.txt b/stable/anchore-engine/templates/NOTES.txt index 97204862..b324f121 100644 --- a/stable/anchore-engine/templates/NOTES.txt +++ b/stable/anchore-engine/templates/NOTES.txt @@ -1,61 +1,51 @@ -To use Anchore Engine you need the URL, username, and password to access the API. -Anchore Engine can be accessed via port {{ .Values.anchoreApi.service.port }} on the following DNS name from within the cluster: -{{ template "anchore-engine.api.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local +To use Anchore you need the URL, username, and password to access the API and/or the UI. +Anchore can be accessed via port {{ .Values.anchoreApi.service.port }} on the following DNS name from within the cluster: -Here are the steps to configure the anchore-cli (`pip install anchorecli`). Use these same values for direct API access as well. + {{ template "anchore-engine.api.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local -To configure your anchore-cli run: - - ANCHORE_CLI_USER=admin - ANCHORE_CLI_PASS=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "anchore-engine.fullname" . }}-admin-pass -o jsonpath="{.data.ANCHORE_ADMIN_PASSWORD}" | base64 --decode; echo) -{{ if .Values.ingress.enabled }} - ANCHORE_CLI_URL={{- if .Values.anchoreGlobal.internalServicesSsl.enabled -}}https{{- else }}http{{- end -}}://$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "anchore-engine.fullname" . }} -o jsonpath="{.status.loadBalancer.ingress[0].ip}")/v1/ -{{ else }} -Using the service endpoint from within the cluster you can use: - ANCHORE_CLI_URL={{- if .Values.anchoreGlobal.internalServicesSsl.enabled -}}https{{- else }}http{{- end -}}://{{ template "anchore-engine.api.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.anchoreApi.service.port}}/v1/ -{{ end }} - -To verify the service is up and running, you can run container for the Anchore Engine CLI: - - kubectl run -i --tty anchore-cli --restart=Always --image anchore/engine-cli {{ if and (not .Values.anchoreGlobal.internalServicesSsl.verifyCerts) .Values.anchoreGlobal.internalServicesSsl.enabled -}}--env ANCHORE_CLI_SSL_VERIFY=n{{- end }} --env ANCHORE_CLI_USER=admin --env ANCHORE_CLI_PASS=${ANCHORE_CLI_PASS} --env ANCHORE_CLI_URL=http://{{ template "anchore-engine.api.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.anchoreApi.service.port}}/v1/ - -from within the container you can use 'anchore-cli' commands. - -* NOTE: On first startup of anchore-engine, it performs a CVE data sync which may take several minutes to complete. During this time the system status will report 'partially_down' and any images added for analysis will stay in the 'not_analyzed' state. +* NOTE: On first startup of Anchore, the policy-engine performs a CVE data sync which may take several minutes to complete. +During this time the system status will report 'partially_down' and any images added for analysis will stay in the 'not_analyzed' state. Once the sync is complete, any queued images will be analyzed and the system status will change to 'all_up'. -Initial setup time can be >120sec for postgresql setup and readiness checks to pass for the services as indicated by pod state. You can check with: - kubectl get pods -l app={{ template "anchore-engine.fullname" .}},component=api - - -A quick primer on using the Anchore Engine CLI follows. For more info see: https://github.com/anchore/anchore-engine/wiki/Getting-Started - -View system status: +Initial setup time can be >120sec for postgresql setup and readiness checks to pass for the services as indicated by pod state. +You can check with: - anchore-cli system status - -Add an image to be analyzed: - - anchore-cli image add + kubectl get pods -l app={{ template "anchore-engine.fullname" .}},component=api -List images and see the analysis status (not_analyzed initially): +{{- if and .Release.IsUpgrade (regexMatch "1.22.[0-9]+" .Chart.Version) }} +{{- $apiDeployment := (lookup "apps/v1" "Deployment" .Release.Namespace (include "anchore-engine.api.fullname" .)) }} +{{- if not $apiDeployment }} - anchore-cli image list +**WARNING** +Anchore Enterprise v4.4.x only supports upgrades from Enterprise v4.2.0 and higher. +See release notes for more information - https://docs.anchore.com/current/docs/releasenotes/440/ +{{- end }} +{{- end }} -Once the image is analyzed you'll see status change to 'analyzed'. This may take some time on first execution with a new database because -the system must first do a CVE data sync which can take several minutes. Once complete, the image will transition to 'analyzing' state. +{{- if not .Values.anchoreEnterpriseGlobal.enabled }} -When the image reaches 'analyzed' state, you can view policy evaluation output with: +**WARNING** +As of January 2023, Anchore Engine is no longer maintained. +There will be no future versions released. Users are advised to use Syft and Grype. +{{- end }} - anchore-cli evaluate check +{{- if and .Release.IsUpgrade .Values.anchoreGlobal.useExistingSecrets (regexMatch "1.24.[0-9]+" .Chart.Version) }} -List CVEs found in the image with: +**WARNING** +Please update your existing secrets to use separate environment variables for the database hostname and port. +Add the following variables to your secrets: +  + ANCHORE_DB_HOST: + ANCHORE_DB_PORT: +{{- end }} - anchore-cli image vuln os +{{- if and .Release.IsUpgrade .Values.anchoreEnterpriseGlobal.enabled }} -List OS packages found in the image with: - anchore-cli image content os +**WARNING** +The upcoming 5.0 release of Anchore Enterprise will require Postgresql version 13.0 or higher. +To verify that your Postgresql version is compatible with Anchore Enterprise v5.0 view the log output of the upgrade jobs: -List files found in the image with: - anchore-cli image content files + kubectl logs -l component=anchore-enterprise-upgrade | grep WARN + kubectl logs -l component=anchore-enterprise-feeds-upgrade | grep WARN +{{- end }} diff --git a/stable/anchore-engine/templates/_helpers.tpl b/stable/anchore-engine/templates/_helpers.tpl index 418a0ec3..62d1396d 100755 --- a/stable/anchore-engine/templates/_helpers.tpl +++ b/stable/anchore-engine/templates/_helpers.tpl @@ -12,7 +12,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this */}} {{- define "anchore-engine.fullname" -}} {{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- default (printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-") .Values.fullnameOverride -}} {{- end -}} {{/* @@ -24,6 +24,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "analyzer"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.analyzer.serviceName" -}} +{{- if .Values.anchoreAnalyzer.service.name }} + {{- print .Values.anchoreAnalyzer.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.analyzer.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.analyzer.serviceAccountName" -}} +{{- if .Values.anchoreAnalyzer.serviceAccountName }} + {{- print .Values.anchoreAnalyzer.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -33,6 +57,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "catalog"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.catalog.serviceName" -}} +{{- if .Values.anchoreCatalog.service.name }} + {{- print .Values.anchoreCatalog.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.catalog.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.catalog.serviceAccountName" -}} +{{- if .Values.anchoreCatalog.serviceAccountName }} + {{- print .Values.anchoreCatalog.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -42,6 +90,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "api"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.api.serviceName" -}} +{{- if .Values.anchoreApi.service.name }} + {{- print .Values.anchoreApi.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.api.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.api.serviceAccountName" -}} +{{- if .Values.anchoreApi.serviceAccountName }} + {{- print .Values.anchoreApi.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -51,6 +123,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "policy"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.policy-engine.serviceName" -}} +{{- if .Values.anchorePolicyEngine.service.name }} + {{- print .Values.anchorePolicyEngine.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.policy-engine.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.policy-engine.serviceAccountName" -}} +{{- if .Values.anchorePolicyEngine.serviceAccountName }} + {{- print .Values.anchorePolicyEngine.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -60,6 +156,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "simplequeue"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.simplequeue.serviceName" -}} +{{- if .Values.anchoreSimpleQueue.service.name }} + {{- print .Values.anchoreSimpleQueue.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.simplequeue.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.simplequeue.serviceAccountName" -}} +{{- if .Values.anchoreSimpleQueue.serviceAccountName }} + {{- print .Values.anchoreSimpleQueue.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -78,6 +198,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "enterprise-ui"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-ui.serviceName" -}} +{{- if .Values.anchoreEnterpriseUi.service.name }} + {{- print .Values.anchoreEnterpriseUi.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.enterprise-ui.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-ui.serviceAccountName" -}} +{{- if .Values.anchoreEnterpriseUi.serviceAccountName }} + {{- print .Values.anchoreEnterpriseUi.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -87,6 +231,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "enterprise-feeds"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-feeds.serviceName" -}} +{{- if .Values.anchoreEnterpriseFeeds.service.name }} + {{- print .Values.anchoreEnterpriseFeeds.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.enterprise-feeds.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-feeds.serviceAccountName" -}} +{{- if .Values.anchoreEnterpriseFeeds.serviceAccountName }} + {{- print .Values.anchoreEnterpriseFeeds.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -96,6 +264,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "enterprise-reports"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-reports.serviceName" -}} +{{- if .Values.anchoreEnterpriseReports.service.name }} + {{- print .Values.anchoreEnterpriseReports.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.enterprise-reports.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-reports.serviceAccountName" -}} +{{- if .Values.anchoreEnterpriseReports.serviceAccountName }} + {{- print .Values.anchoreEnterpriseReports.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -105,6 +297,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "enterprise-notifications"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-notifications.serviceName" -}} +{{- if .Values.anchoreEnterpriseNotifications.service.name }} + {{- print .Values.anchoreEnterpriseNotifications.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.enterprise-notifications.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-notifications.serviceAccountName" -}} +{{- if .Values.anchoreEnterpriseNotifications.serviceAccountName }} + {{- print .Values.anchoreEnterpriseNotifications.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -114,6 +330,30 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- printf "%s-%s-%s" .Release.Name $name "enterprise-rbac"| trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Set the appropriate kubernetes service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-rbac.serviceName" -}} +{{- if .Values.anchoreEnterpriseRbac.service.name }} + {{- print .Values.anchoreEnterpriseRbac.service.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "anchore-engine.enterprise-rbac.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "anchore-engine.enterprise-rbac.serviceAccountName" -}} +{{- if .Values.anchoreEnterpriseRbac.serviceAccountName }} + {{- print .Values.anchoreEnterpriseRbac.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.anchoreGlobal.serviceAccountName -}} + {{- print .Values.anchoreGlobal.serviceAccountName | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified dependency name for the db. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -143,7 +383,7 @@ Create a default fully qualified dependency name for the db. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "redis.fullname" -}} -{{- printf "%s-%s" .Release.Name "anchore-ui-redis" | trunc 63 | trimSuffix "-" -}} +{{- printf "%s-%s" .Release.Name "ui-redis" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* @@ -158,15 +398,96 @@ Return Anchore Engine default admin password {{- end -}} {{/* -Create database hostname string from supplied values file. Used for the enterprise-ui ANCHORE_APPDB_URI environment variable secret +Create feeds database hostname string from supplied values file. Used for setting the ANCHORE_FEEDS_DB_HOST env var in the Feeds secret. +*/}} +{{- define "feeds-db-hostname" }} + {{- if and (index .Values "anchore-feeds-db" "externalEndpoint") (not (index .Values "anchore-feeds-db" "enabled")) }} + {{- print ( index .Values "anchore-feeds-db" "externalEndpoint" ) }} + {{- else if and (index .Values "cloudsql" "enabled") (not (index .Values "anchore-feeds-db" "enabled")) }} + {{- print "localhost" }} + {{- else }} + {{- $db_host := include "postgres.anchore-feeds-db.fullname" . }} + {{- print $db_host -}} + {{- end }} +{{- end }} + +{{/* +Create database hostname string from supplied values file. Used for setting the ANCHORE_DB_HOST env var in the UI & Engine secret. */}} {{- define "db-hostname" }} {{- if and (index .Values "postgresql" "externalEndpoint") (not (index .Values "postgresql" "enabled")) }} {{- print ( index .Values "postgresql" "externalEndpoint" ) }} {{- else if and (index .Values "cloudsql" "enabled") (not (index .Values "postgresql" "enabled")) }} - {{- print "localhost:5432" }} + {{- print "localhost" }} {{- else }} {{- $db_host := include "postgres.fullname" . }} - {{- printf "%s:5432" $db_host -}} + {{- print $db_host -}} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} + +{{/* +Allows sourcing of a specified file in the entrypoint of all containers when .Values.anchoreGlobal.doSourceAtEntry.enabled=true +*/}} +{{- define "doSourceFile" }} +{{- if .Values.anchoreGlobal.doSourceAtEntry.enabled }} + {{- if .Values.anchoreGlobal.doSourceAtEntry.filePath }} + {{- fail "As of chart v1.23.1 `.Values.anchoreGlobal.doSourceAtEntry.filePath` is no longer valid. Update your values file to set `.Values.anchoreGlobal.doSourceAtEntry.filePaths` which accepts a list of strings." }} + {{- end }} + {{- range $index, $file := .Values.anchoreGlobal.doSourceAtEntry.filePaths }} + {{- printf "if [ -f %v ];then source %v;fi;" $file $file }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Upon upgrades, checks if .Values.existingSecret=true and fails the upgrade if .Values.useExistingSecret is not set. +*/}} +{{- define "checkUpgradeForExistingSecret" }} +{{- if and .Release.IsUpgrade .Values.anchoreGlobal.existingSecret (not .Values.anchoreGlobal.useExistingSecrets) }} + {{- fail "As of chart v1.21.0 `.Values.anchoreGlobal.existingSecret` is no longer a valid configuration value. See the chart README for more instructions on configuring existing secrets - https://github.com/anchore/anchore-charts/blob/main/stable/anchore-engine/README.md#chart-version-1210" }} +{{- end }} +{{- end }} + +{{/* +Upon upgrade, check if user is upgrading to chart v1.22.0+ (Enterprise v4.4.0). If they are, ensure that they are +upgrading from Enterprise 4.2.0 or higher and error out if they're upgrading from an older version. +*/}} +{{- define "checkUpgradeCompatibility" }} +{{- if and .Release.IsUpgrade (regexMatch "1.22.[0-9]+" .Chart.Version) }} + {{- $apiDeployment := (lookup "apps/v1" "Deployment" .Release.Namespace (include "anchore-engine.api.fullname" .)) }} + {{- if $apiDeployment }} + {{- $apiDeploymentContainers := $apiDeployment.spec.template.spec.containers}} + {{- range $index, $container := $apiDeploymentContainers }} + {{- if eq $container.name "anchore-engine-api" }} + {{- $apiContainerImage := $container.image }} + {{- $installedAnchoreVersion := (regexFind ":v[0-9]+\\.[0-9]+\\.[0-9]+" $apiContainerImage | trimPrefix ":") }} + {{- if $installedAnchoreVersion }} + {{- if not (regexMatch "v4\\.[2-9]\\.[0-9]" ($installedAnchoreVersion | quote)) }} + {{- fail "Anchore Enterprise v4.4.x only supports upgrades from Enterprise v4.2.0 and higher. See release notes for more information - https://docs.anchore.com/current/docs/releasenotes/440/" }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Upon upgrade, check if the user uses non-default values for ingress path configurations +*/}} +{{- define "checkIngressValues" -}} +{{- if and .Release.IsUpgrade (or .Values.ingress.feedsPath .Values.ingress.reportsPath .Values.ingress.apiPath) }} +{{- fail "As of chart v1.28.0, the `ingress.feedsPath`, `ingress.reportsPath`, and `ingress.apiPath` values are no longer valid. See README for more information - https://github.com/anchore/anchore-charts/blob/main/stable/anchore-engine/README.md#chart-version-1280" }} +{{- end }} +{{- end -}} + +{{/* +Fail if the enterprise image is v5.0.0 or greater +*/}} +{{- define "checkAnchoreEnterpriseCompatibility" -}} +{{- $imageVersion := (index (splitList ":" .Values.anchoreEnterpriseGlobal.image) 1) -}} +{{- $majorVersion := int (index (splitList "." (trimPrefix "v" $imageVersion)) 0) -}} +{{- if ge $majorVersion 5 -}} + {{- fail "Upgrading to Anchore 5.0.0+ is not supported with the engine chart. For information on migrating to the enterprise chart, please refer to https://github.com/anchore/anchore-charts/tree/main/stable/enterprise#migrating-to-the-anchore-enterprise-helm-chart" }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/stable/anchore-engine/templates/analyzer_configmap.yaml b/stable/anchore-engine/templates/analyzer_configmap.yaml index df8a3575..f75ba47c 100644 --- a/stable/anchore-engine/templates/analyzer_configmap.yaml +++ b/stable/anchore-engine/templates/analyzer_configmap.yaml @@ -16,4 +16,4 @@ data: analyzer_config.yaml: | {{- with .Values.anchoreAnalyzer.configFile }} {{- toYaml . | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/stable/anchore-engine/templates/analyzer_deployment.yaml b/stable/anchore-engine/templates/analyzer_deployment.yaml index 77ae0c24..2173fecb 100644 --- a/stable/anchore-engine/templates/analyzer_deployment.yaml +++ b/stable/anchore-engine/templates/analyzer_deployment.yaml @@ -51,10 +51,9 @@ spec: {{- with .Values.anchoreAnalyzer.annotations }} {{ toYaml . | nindent 8 }} {{- end }} - {{- if not (or .Values.inject_secrets_via_env .Values.anchoreGlobal.existingSecret) }} + {{- if not (or .Values.inject_secrets_via_env .Values.anchoreGlobal.useExistingSecrets) }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/engine-config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} checksum/analyzer-config: {{ include (print $.Template.BasePath "/analyzer_configmap.yaml") . | sha256sum }} spec: @@ -74,7 +73,7 @@ spec: {{- if and .Values.anchoreGlobal.scratchVolume.fixGroupPermissions .Values.anchoreGlobal.securityContext.fsGroup }} initContainers: - name: mode-fixer - image: alpine + image: {{ .Values.anchoreGlobal.scratchVolume.initContainerImage }} securityContext: runAsUser: 0 volumeMounts: @@ -90,8 +89,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -109,16 +117,23 @@ spec: image: {{ .Values.anchoreGlobal.image }} imagePullPolicy: {{ .Values.anchoreGlobal.imagePullPolicy }} {{- end }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] {{- if .Values.anchoreEnterpriseGlobal.enabled }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "analyzer"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade analyzer {{- else }} - args: ["anchore-manager", "service", "start", "--no-auto-upgrade", "analyzer"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-manager service start --no-auto-upgrade analyzer {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -126,8 +141,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -139,9 +152,11 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + ports: - name: analyzer-api containerPort: {{ .Values.anchoreAnalyzer.containerPort }} + volumeMounts: {{- if .Values.anchoreEnterpriseGlobal.enabled }} - name: anchore-license @@ -169,6 +184,9 @@ spec: - name: run mountPath: /var/run/anchore {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -230,6 +248,11 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreAnalyzer.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -242,6 +265,6 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.analyzer.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} diff --git a/stable/anchore-engine/templates/anchore_admin_secret.yaml b/stable/anchore-engine/templates/anchore_admin_secret.yaml index 1a8724b6..4f9e9806 100644 --- a/stable/anchore-engine/templates/anchore_admin_secret.yaml +++ b/stable/anchore-engine/templates/anchore_admin_secret.yaml @@ -1,4 +1,5 @@ -{{- if not .Values.anchoreGlobal.existingSecret }} +{{- template "checkUpgradeForExistingSecret" . }} +{{- if not .Values.anchoreGlobal.useExistingSecrets }} {{- $anchoreAdminPass := (include "anchore-engine.defaultAdminPassword" . | quote) }} diff --git a/stable/anchore-engine/templates/api_deployment.yaml b/stable/anchore-engine/templates/api_deployment.yaml index a63dc265..fa2d8e3e 100644 --- a/stable/anchore-engine/templates/api_deployment.yaml +++ b/stable/anchore-engine/templates/api_deployment.yaml @@ -54,7 +54,6 @@ spec: {{- if not .Values.inject_secrets_via_env }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/engine-config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} {{- if .Values.anchoreGlobal.policyBundles }} checksum/policy-config: {{ include (print $.Template.BasePath "/policy_bundle_configmap.yaml") . | sha256sum }} @@ -78,8 +77,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -97,16 +105,23 @@ spec: image: {{ .Values.anchoreGlobal.image }} imagePullPolicy: {{ .Values.anchoreGlobal.imagePullPolicy }} {{- end }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] {{- if .Values.anchoreEnterpriseGlobal.enabled }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "apiext"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade apiext {{- else }} - args: ["anchore-manager", "service", "start", "--no-auto-upgrade", "apiext"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-manager service start --no-auto-upgrade apiext {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -114,8 +129,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -131,7 +144,7 @@ spec: - name: ANCHORE_CLI_PASS valueFrom: secretKeyRef: - name: {{ default (print (include "anchore-engine.fullname" .) "-admin-pass") .Values.anchoreGlobal.existingSecret }} + name: {{ ternary .Values.anchoreGlobal.existingSecretName (print (include "anchore-engine.fullname" .) "-admin-pass") .Values.anchoreGlobal.useExistingSecrets }} key: ANCHORE_ADMIN_PASSWORD {{- end }} ports: @@ -166,6 +179,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -195,12 +211,18 @@ spec: - name: {{ .Chart.Name }}-rbac-authorizer image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "rbac_authorizer"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade rbac_authorizer envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -208,8 +230,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -244,6 +264,10 @@ spec: - name: run mountPath: /var/run/anchore {{- end }} + + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: exec: command: @@ -272,15 +296,21 @@ spec: - name: "{{ .Chart.Name }}-reports-api" image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "reports"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade reports ports: - containerPort: {{ .Values.anchoreEnterpriseReports.service.apiPort }} name: reports-api envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -288,8 +318,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -321,6 +349,9 @@ spec: - name: run mountPath: /var/run/anchore {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -382,6 +413,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchoreApi.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -394,7 +428,7 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.api.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} @@ -402,7 +436,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.api.fullname" . }} + name: {{ template "anchore-engine.api.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -426,11 +460,17 @@ spec: port: {{ .Values.anchoreApi.service.port }} targetPort: {{ .Values.anchoreApi.service.port }} protocol: TCP + {{- with .Values.anchoreApi.service.nodePort }} + nodePort: {{ . }} + {{- end }} {{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseReports.enabled }} - name: reports-api port: {{ .Values.anchoreEnterpriseReports.service.apiPort }} targetPort: {{ .Values.anchoreEnterpriseReports.service.apiPort }} protocol: TCP + {{- with .Values.anchoreEnterpriseReports.service.apiNodePort }} + nodePort: {{ . }} + {{- end }} {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} diff --git a/stable/anchore-engine/templates/catalog_deployment.yaml b/stable/anchore-engine/templates/catalog_deployment.yaml index 6901fa9a..b12ffc17 100644 --- a/stable/anchore-engine/templates/catalog_deployment.yaml +++ b/stable/anchore-engine/templates/catalog_deployment.yaml @@ -54,7 +54,6 @@ spec: {{- if not .Values.inject_secrets_via_env }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/engine-config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} {{- if .Values.anchoreGlobal.policyBundles }} checksum/policy-config: {{ include (print $.Template.BasePath "/policy_bundle_configmap.yaml") . | sha256sum }} @@ -72,14 +71,37 @@ spec: imagePullSecrets: - name: {{ . }} {{- end }} + {{- end }} + {{- if and .Values.anchoreGlobal.scratchVolume.fixGroupPermissions .Values.anchoreGlobal.securityContext.fsGroup }} + initContainers: + - name: mode-fixer + image: {{ .Values.anchoreGlobal.scratchVolume.initContainerImage }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: {{ $component }}-scratch + mountPath: {{ .Values.anchoreGlobal.scratchVolume.mountPath }} + command: + - sh + - -c + - (chmod 0775 {{ .Values.anchoreGlobal.scratchVolume.mountPath }}; chgrp {{ .Values.anchoreGlobal.securityContext.fsGroup }} {{ .Values.anchoreGlobal.scratchVolume.mountPath }} ) {{- end }} containers: {{- if .Values.cloudsql.enabled }} - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -97,16 +119,23 @@ spec: image: {{ .Values.anchoreGlobal.image }} imagePullPolicy: {{ .Values.anchoreGlobal.imagePullPolicy }} {{- end }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] {{- if .Values.anchoreEnterpriseGlobal.enabled }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "catalog"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade catalog {{- else }} - args: ["anchore-manager", "service", "start", "--no-auto-upgrade", "catalog"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-manager service start --no-auto-upgrade catalog {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -114,8 +143,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -139,6 +166,8 @@ spec: - name: config-volume mountPath: /config/config.yaml subPath: config.yaml + - name: {{ $component }}-scratch + mountPath: {{ .Values.anchoreGlobal.scratchVolume.mountPath }} {{- if .Values.anchoreGlobal.policyBundles }} {{- range $key, $value := .Values.anchoreGlobal.policyBundles }} - name: policy-bundle-volume @@ -159,6 +188,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -193,6 +225,12 @@ spec: - name: config-volume configMap: name: {{ template "anchore-engine.fullname" . }} + - name: {{ $component }}-scratch + {{- if .Values.anchoreGlobal.scratchVolume.details }} + {{- toYaml .Values.anchoreGlobal.scratchVolume.details | nindent 10 }} + {{- else }} + emptyDir: {} + {{- end }} {{- if .Values.anchoreGlobal.policyBundles }} - name: policy-bundle-volume configMap: @@ -216,6 +254,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchoreCatalog.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -228,17 +269,20 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- if .Values.anchoreGlobal.serviceAccountName }} - serviceAccountName: {{ .Values.anchoreGlobal.serviceAccountName }} - {{- else if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreCatalog.createServiceAccount .Values.anchoreCatalog.runtimeInventory.reportAnchoreCluster.enabled }} - serviceAccountName: {{ template "anchore-engine.catalog.fullname" . }} + {{- if or .Values.anchoreCatalog.serviceAccountName .Values.anchoreGlobal.serviceAccountName }} + serviceAccountName: {{ include "anchore-engine.catalog.serviceAccountName" . }} + {{- else if .Release.IsUpgrade }} + # Including serviceAccount to remove existing service accounts from all deployments + # see - https://github.com/kubernetes/kubernetes/issues/76367 + serviceAccount: "" + serviceAccountName: "" {{- end }} --- apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.catalog.fullname" . }} + name: {{ template "anchore-engine.catalog.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -262,6 +306,9 @@ spec: port: {{ .Values.anchoreCatalog.service.port }} targetPort: {{ .Values.anchoreCatalog.service.port }} protocol: TCP + {{- with .Values.anchoreCatalog.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} diff --git a/stable/anchore-engine/templates/catalog_rbac.yaml b/stable/anchore-engine/templates/catalog_rbac.yaml deleted file mode 100644 index 6898a1d8..00000000 --- a/stable/anchore-engine/templates/catalog_rbac.yaml +++ /dev/null @@ -1,64 +0,0 @@ -{{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreCatalog.createServiceAccount .Values.anchoreCatalog.runtimeInventory.reportAnchoreCluster.enabled }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "anchore-engine.catalog.fullname" . }} - labels: - {{- with .Values.anchoreCatalog.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.anchoreGlobal.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.anchoreCatalog.annotations }} - annotations: - {{ toYaml . | nindent 4 }} - {{- end }} - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "anchore-engine.catalog.fullname" . }} - labels: - {{- with .Values.anchoreCatalog.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.anchoreGlobal.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.anchoreCatalog.annotations }} - annotations: - {{ toYaml . | nindent 4 }} - {{- end }} -rules: -- apiGroups: [""] - resources: ["pods","namespaces"] - verbs: ["get", "watch", "list"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "anchore-engine.catalog.fullname" . }} - labels: - {{- with .Values.anchoreCatalog.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.anchoreGlobal.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.anchoreCatalog.annotations }} - annotations: - {{ toYaml . | nindent 4 }} - {{- end }} -subjects: - - kind: ServiceAccount - name: {{ template "anchore-engine.catalog.fullname" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ template "anchore-engine.catalog.fullname" . }} - apiGroup: rbac.authorization.k8s.io -{{- end }} \ No newline at end of file diff --git a/stable/anchore-engine/templates/engine_configmap.yaml b/stable/anchore-engine/templates/engine_configmap.yaml index 578d8aa4..48340a1c 100644 --- a/stable/anchore-engine/templates/engine_configmap.yaml +++ b/stable/anchore-engine/templates/engine_configmap.yaml @@ -1,5 +1,5 @@ {{- $anchoreFeedsURL := "https://ancho.re/v1/service/feeds" -}} -{{- $grypeProviderFeedsExternalURL := "https://toolbox-data.anchore.io/grype/databases/listing.json" -}} +{{- $grypeProviderFeedsExternalURL := "" -}} {{- if .Values.anchoreEnterpriseFeeds.url -}} {{- $urlPathSuffix := (default "" (regexFind "/v1.*$" .Values.anchoreEnterpriseFeeds.url)) -}} @@ -8,17 +8,17 @@ {{- $grypeProviderFeedsExternalURL = (printf "%s/v1/databases/grypedb" $anchoreFeedsHost) -}} {{- else if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseFeeds.enabled -}} {{- if .Values.anchoreGlobal.internalServicesSsl.enabled -}} - {{- $anchoreFeedsURL = (printf "https://%s:%s/v1/feeds" (include "anchore-engine.enterprise-feeds.fullname" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} - {{- $grypeProviderFeedsExternalURL = (printf "https://%s:%s/v1/databases/grypedb" (include "anchore-engine.enterprise-feeds.fullname" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} + {{- $anchoreFeedsURL = (printf "https://%s:%s/v1/feeds" (include "anchore-engine.enterprise-feeds.serviceName" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} + {{- $grypeProviderFeedsExternalURL = (printf "https://%s:%s/v1/databases/grypedb" (include "anchore-engine.enterprise-feeds.serviceName" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} {{- else -}} - {{- $anchoreFeedsURL = (printf "http://%s:%s/v1/feeds" (include "anchore-engine.enterprise-feeds.fullname" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} - {{- $grypeProviderFeedsExternalURL = (printf "http://%s:%s/v1/databases/grypedb" (include "anchore-engine.enterprise-feeds.fullname" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} + {{- $anchoreFeedsURL = (printf "http://%s:%s/v1/feeds" (include "anchore-engine.enterprise-feeds.serviceName" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} + {{- $grypeProviderFeedsExternalURL = (printf "http://%s:%s/v1/databases/grypedb" (include "anchore-engine.enterprise-feeds.serviceName" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} {{- end -}} {{- end -}} -{{- $vulnerabilityProvider := .Values.anchorePolicyEngine.vulnerabilityProvider -}} -{{- if and .Values.anchoreEnterpriseGlobal.enabled (ne $vulnerabilityProvider "grype") -}} - {{ fail "The v2 (grype) vulnerability scanner is the only valid configuration for chart v1.18.0 and higher. Please set the following: `.Values.anchorePolicyEngine.vulnerabilityProvider=grype`." }} +{{- /* Set the grypeProviderFeedsExternalURL to upstream feeds if still unset or if specifically overridden */}} +{{- if or (empty $grypeProviderFeedsExternalURL) .Values.anchorePolicyEngine.overrideFeedsToUpstream -}} + {{- $grypeProviderFeedsExternalURL = "https://toolbox-data.anchore.io/grype/databases/listing.json" -}} {{- end -}} kind: ConfigMap @@ -67,6 +67,8 @@ data: # Defines a maximum compressed image size (MB) to be added for analysis # Value < 0 disables feature. Disabled by default max_compressed_image_size_mb: {{ default -1 .Values.anchoreGlobal.maxCompressedImageSizeMB }} + max_source_import_size_mb: {{ default 100 .Values.anchoreGlobal.maxSourceImportSizeMB }} + max_import_content_size_mb: {{ default 100 .Values.anchoreGlobal.maxImportContentSizeMB }} # Locations for keys used for signing and encryption. Only one of 'secret' or 'public_key_path'/'private_key_path' needs to be set. If all are set then the keys take precedence over the secret value # Secret is for a shared secret and if set, all components in anchore should have the exact same value in their configs. @@ -86,24 +88,40 @@ data: oauth: enabled: {{ .Values.anchoreGlobal.oauthEnabled }} default_token_expiration_seconds: {{ .Values.anchoreGlobal.oauthTokenExpirationSeconds }} + refresh_token_expiration_seconds: {{ .Values.anchoreGlobal.oauthRefreshTokenExpirationSeconds }} # Set this to True to enable storing user passwords only as secure hashes in the db. This can dramatically increase CPU usage if you # don't also use oauth and tokens for internal communications (which requires keys/secret to be configured as well) # WARNING: you should not change this after a system has been initialized as it may cause a mismatch in existing passwords hashed_passwords: {{ .Values.anchoreGlobal.hashedPasswords }} + # Set this to True in order to disable the SSO JIT provisioning during authentication. This provides an additional + # layer of security and configuration for SSO users to gain access to Anchore. This is disabled by default. + sso_require_existing_users: {{ .Values.anchoreGlobal.ssoRequireExistingUsers }} + credentials: database: - {{- if not .Values.anchoreGlobal.dbConfig.ssl }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}" - {{- else if eq .Values.anchoreGlobal.dbConfig.sslMode "require" }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}" - {{- else }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName -}}" - {{- end }} + {{- if .Values.anchoreEnterpriseGlobal.enabled }} + user: "${ANCHORE_DB_USER}" + password: "${ANCHORE_DB_PASSWORD}" + host: "${ANCHORE_DB_HOST}" + port: "${ANCHORE_DB_PORT}" + name: "${ANCHORE_DB_NAME}" + {{- else }} + db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_DB_PASSWORD}@${ANCHORE_DB_HOST}:${ANCHORE_DB_PORT}/${ANCHORE_DB_NAME}" + {{- end }} + db_connect_args: timeout: {{ .Values.anchoreGlobal.dbConfig.timeout }} + {{- if .Values.anchoreGlobal.dbConfig.ssl }} + ssl: true + sslmode: {{ .Values.anchoreGlobal.dbConfig.sslMode -}} + {{- if not (eq .Values.anchoreGlobal.dbConfig.sslMode "require") }} + sslrootcert: /home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName -}} + {{- end }} + {{- else }} ssl: false + {{- end }} db_pool_size: {{ .Values.anchoreGlobal.dbConfig.connectionPoolSize }} db_pool_max_overflow: {{ .Values.anchoreGlobal.dbConfig.connectionPoolMaxOverflow }} {{- with .Values.anchoreGlobal.dbConfig.engineArgs }} @@ -114,7 +132,7 @@ data: apiext: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.api.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.api.serviceName" . }} max_request_threads: {{ default 50 .Values.anchoreApi.maxRequestThreads }} listen: 0.0.0.0 port: {{ .Values.anchoreApi.service.port }} @@ -162,10 +180,11 @@ data: ssl_key: "/home/anchore/certs/{{- .Values.anchoreGlobal.internalServicesSsl.certSecretKeyName }}" {{- end }} enable_owned_package_filtering: {{ .Values.anchoreAnalyzer.enableOwnedPackageFiltering }} + keep_image_analysis_tmpfiles: {{ default false .Values.anchoreAnalyzer.keepImageAnalysisTmpfiles }} catalog: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.catalog.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.catalog.serviceName" . }} listen: 0.0.0.0 port: {{ .Values.anchoreCatalog.service.port }} max_request_threads: {{ default 50 .Values.anchoreCatalog.maxRequestThreads }} @@ -192,7 +211,6 @@ data: service_watcher: {{ .Values.anchoreCatalog.cycleTimers.service_watcher }} # Interval between checks to repo for new tags repo_watcher: {{ .Values.anchoreCatalog.cycleTimers.repo_watcher }} - k8s_watcher: {{ .Values.anchoreCatalog.cycleTimers.k8s_watcher }} k8s_image_watcher: {{ .Values.anchoreCatalog.cycleTimers.k8s_image_watcher }} resource_metrics: {{ .Values.anchoreCatalog.cycleTimers.resource_metrics }} events_gc: {{ .Values.anchoreCatalog.cycleTimers.events_gc }} @@ -215,16 +233,12 @@ data: {{- end }} runtime_inventory: image_ttl_days: {{ .Values.anchoreCatalog.runtimeInventory.imageTTLDays }} - kubernetes: - report_anchore_cluster: - enabled: {{ .Values.anchoreCatalog.runtimeInventory.reportAnchoreCluster.enabled }} - anchore_cluster_name: {{ .Values.anchoreCatalog.runtimeInventory.reportAnchoreCluster.clusterName }} - namespaces: - {{- toYaml .Values.anchoreCatalog.runtimeInventory.reportAnchoreCluster.namespaces | nindent 16 }} + image_ingest_overwrite: {{ default false .Values.anchoreCatalog.runtimeInventory.imageIngestOverwrite }} + down_analyzer_task_requeue: {{ .Values.anchoreCatalog.downAnalyzerTaskRequeue }} simplequeue: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.simplequeue.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.simplequeue.serviceName" . }} listen: 0.0.0.0 port: {{ .Values.anchoreSimpleQueue.service.port }} max_request_threads: {{ default 50 .Values.anchoreSimpleQueue.maxRequestThreads }} @@ -237,15 +251,17 @@ data: enabled: true require_auth: true max_request_threads: {{ default 50 .Values.anchorePolicyEngine.maxRequestThreads }} - endpoint_hostname: {{ template "anchore-engine.policy-engine.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.policy-engine.serviceName" . }} listen: 0.0.0.0 port: {{ .Values.anchorePolicyEngine.service.port }} policy_evaluation_cache_ttl: {{ .Values.anchorePolicyEngine.cacheTTL }} cycle_timer_seconds: 1 cycle_timers: {{- toYaml .Values.anchorePolicyEngine.cycleTimers | nindent 10 }} + enable_package_db_load: {{ .Values.anchorePolicyEngine.enablePackageDbLoad }} + enable_images_by_vulnerability_api: {{ .Values.anchorePolicyEngine.enableImagesByVulnerabilityAPI }} vulnerabilities: - provider: {{ $vulnerabilityProvider }} + provider: grype sync: enabled: true ssl_verify: {{ .Values.anchoreGlobal.internalServicesSsl.verifyCerts }} @@ -282,15 +298,6 @@ data: {{- end }} url: {{ $anchoreFeedsURL }} {{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseFeeds.enabled }} - vulndb: - enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.vulndbDriverEnabled | quote) }} - url: {{ $anchoreFeedsURL }} - {{- else if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreGlobal.syncVulnDB }} - vulndb: - enabled: true - url: {{ $anchoreFeedsURL }} - {{- end }} - {{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseFeeds.enabled }} microsoft: enabled: {{ .Values.anchoreEnterpriseFeeds.msrcDriverEnabled }} url: {{ $anchoreFeedsURL }} @@ -303,4 +310,4 @@ data: ssl_enable: {{ .Values.anchoreGlobal.internalServicesSsl.enabled }} ssl_cert: "/home/anchore/certs/{{- .Values.anchoreGlobal.internalServicesSsl.certSecretCertName }}" ssl_key: "/home/anchore/certs/{{- .Values.anchoreGlobal.internalServicesSsl.certSecretKeyName }}" - {{- end }} \ No newline at end of file + {{- end }} diff --git a/stable/anchore-engine/templates/engine_configmap_env.yaml b/stable/anchore-engine/templates/engine_configmap_env.yaml deleted file mode 100644 index eff162dd..00000000 --- a/stable/anchore-engine/templates/engine_configmap_env.yaml +++ /dev/null @@ -1,22 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{ template "anchore-engine.fullname" . }}-env - labels: - app: {{ template "anchore-engine.fullname" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- with .Values.anchoreGlobal.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} -data: - ANCHORE_DB_NAME: {{ index .Values "postgresql" "postgresDatabase" | quote }} - ANCHORE_DB_USER: {{ index .Values "postgresql" "postgresUser" | quote }} - {{- if and (index .Values "postgresql" "externalEndpoint") (not (index .Values "postgresql" "enabled")) }} - ANCHORE_DB_HOST: {{ index .Values "postgresql" "externalEndpoint" | quote }} - {{- else if and (index .Values "cloudsql" "enabled") (not (index .Values "postgresql" "enabled")) }} - ANCHORE_DB_HOST: "localhost:5432" - {{- else }} - ANCHORE_DB_HOST: "{{ template "postgres.fullname" . }}:5432" - {{- end }} diff --git a/stable/anchore-engine/templates/engine_secret.yaml b/stable/anchore-engine/templates/engine_secret.yaml index e23cc638..95714222 100644 --- a/stable/anchore-engine/templates/engine_secret.yaml +++ b/stable/anchore-engine/templates/engine_secret.yaml @@ -1,4 +1,5 @@ -{{- if not .Values.anchoreGlobal.existingSecret }} +{{- template "checkUpgradeForExistingSecret" . }} +{{- if not .Values.anchoreGlobal.useExistingSecrets }} apiVersion: v1 kind: Secret metadata: @@ -13,7 +14,11 @@ metadata: {{- end }} type: Opaque stringData: + ANCHORE_DB_NAME: {{ index .Values "postgresql" "postgresDatabase" | quote }} + ANCHORE_DB_USER: {{ index .Values "postgresql" "postgresUser" | quote }} ANCHORE_DB_PASSWORD: {{ index .Values "postgresql" "postgresPassword" | quote }} + ANCHORE_DB_HOST: "{{ template "db-hostname" . }}" + ANCHORE_DB_PORT: {{ index .Values "postgresql" "postgresPort" | quote }} {{- with .Values.anchoreGlobal.saml.secret }} ANCHORE_SAML_SECRET: {{ . }} {{- end }} diff --git a/stable/anchore-engine/templates/engine_upgrade_job.yaml b/stable/anchore-engine/templates/engine_upgrade_job.yaml index 1c97fae2..c0b7bd0d 100644 --- a/stable/anchore-engine/templates/engine_upgrade_job.yaml +++ b/stable/anchore-engine/templates/engine_upgrade_job.yaml @@ -1,4 +1,5 @@ -{{- if .Values.anchoreEngineUpgradeJob.enabled }} +{{- template "checkUpgradeCompatibility" . }} +{{- if and .Values.anchoreEngineUpgradeJob.enabled (not .Values.anchoreGlobal.usePreupgradeHook) }} apiVersion: batch/v1 kind: Job metadata: @@ -8,6 +9,12 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name | quote }} app.kubernetes.io/version: {{ .Chart.AppVersion }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + {{- with .Values.anchoreEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} annotations: "helm.sh/hook": post-upgrade "helm.sh/hook-weight": "-5" @@ -19,6 +26,14 @@ spec: app.kubernetes.io/managed-by: {{ .Release.Service | quote }} app.kubernetes.io/instance: {{ .Release.Name | quote }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "anchore-engine.fullname" . }} + component: anchore-engine-upgrade + {{- with .Values.anchoreEngineUpgradeJob.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} annotations: {{- with .Values.anchoreGlobal.annotations }} {{ toYaml . | nindent 8 }} @@ -49,8 +64,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -72,26 +96,34 @@ spec: args: {{- if not .Values.anchoreGlobal.dbConfig.ssl }} - | - anchore-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}" upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}" upgrade --dontask; {{- else if eq .Values.anchoreGlobal.dbConfig.sslMode "require"}} - | - anchore-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} upgrade --dontask; {{- else }} - | - anchore-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} upgrade --dontask; {{- end }} {{- if .Values.cloudsql.enabled }} - sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; + - sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; securityContext: + {{- with .Values.anchoreGlobal.containerSecurityContext }} + {{ toYaml . | nindent 10 }} + {{- end }} capabilities: add: - SYS_PTRACE + {{- else }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -99,21 +131,22 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if (.Values.anchoreGlobal.certStoreSecretName) }} volumeMounts: + {{- if (.Values.anchoreGlobal.certStoreSecretName) }} - name: certs mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} resources: {{ toYaml .Values.anchoreEngineUpgradeJob.resources | nindent 10 }} - {{- if or .Values.anchoreGlobal.certStoreSecretName .Values.cloudsql.useExistingServiceAcc }} + {{- if or .Values.anchoreGlobal.certStoreSecretName .Values.cloudsql.useExistingServiceAcc .Values.anchoreGlobal.extraVolumes }} volumes: {{- with .Values.anchoreGlobal.certStoreSecretName }} - name: certs @@ -125,6 +158,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} {{- with .Values.anchoreEngineUpgradeJob.nodeSelector }} nodeSelector: diff --git a/stable/anchore-engine/templates/enterprise_configmap.yaml b/stable/anchore-engine/templates/enterprise_configmap.yaml index 3d7e6f04..9f3da5f7 100644 --- a/stable/anchore-engine/templates/enterprise_configmap.yaml +++ b/stable/anchore-engine/templates/enterprise_configmap.yaml @@ -1,3 +1,4 @@ +{{- template "checkAnchoreEnterpriseCompatibility" . }} {{- if and .Values.anchoreEnterpriseGlobal.enabled (or .Values.anchoreEnterpriseRbac.enabled .Values.anchoreEnterpriseReports.enabled) -}} {{- $component := "enterprise" -}} apiVersion: v1 @@ -56,24 +57,36 @@ data: oauth: enabled: {{ .Values.anchoreGlobal.oauthEnabled }} default_token_expiration_seconds: {{ .Values.anchoreGlobal.oauthTokenExpirationSeconds }} + refresh_token_expiration_seconds: {{ .Values.anchoreGlobal.oauthRefreshTokenExpirationSeconds }} # Set this to True to enable storing user passwords only as secure hashes in the db. This can dramatically increase CPU usage if you # don't also use oauth and tokens for internal communications (which requires keys/secret to be configured as well) # WARNING: you should not change this after a system has been initialized as it may cause a mismatch in existing passwords hashed_passwords: {{ .Values.anchoreGlobal.hashedPasswords }} + # Set this to True in order to disable the SSO JIT provisioning during authentication. This provides an additional + # layer of security and configuration for SSO users to gain access to Anchore. This is disabled by default. + sso_require_existing_users: {{ .Values.anchoreGlobal.ssoRequireExistingUsers }} + credentials: database: - {{- if not .Values.anchoreGlobal.dbConfig.ssl }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}" - {{- else if eq .Values.anchoreGlobal.dbConfig.sslMode "require" }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}" - {{- else }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName -}}" - {{- end }} + user: "${ANCHORE_DB_USER}" + password: "${ANCHORE_DB_PASSWORD}" + host: "${ANCHORE_DB_HOST}" + port: "${ANCHORE_DB_PORT}" + name: "${ANCHORE_DB_NAME}" + db_connect_args: timeout: {{ .Values.anchoreGlobal.dbConfig.timeout }} + {{- if .Values.anchoreGlobal.dbConfig.ssl }} + ssl: true + sslmode: {{ .Values.anchoreGlobal.dbConfig.sslMode -}} + {{- if not (eq .Values.anchoreGlobal.dbConfig.sslMode "require") }} + sslrootcert: /home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName -}} + {{- end }} + {{- else }} ssl: false + {{- end }} db_pool_size: {{ .Values.anchoreGlobal.dbConfig.connectionPoolSize }} db_pool_max_overflow: {{ .Values.anchoreGlobal.dbConfig.connectionPoolMaxOverflow }} {{- with .Values.anchoreGlobal.dbConfig.engineArgs }} @@ -94,7 +107,7 @@ data: rbac_manager: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.enterprise-rbac.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.enterprise-rbac.serviceName" . }} listen: 0.0.0.0 port: {{ .Values.anchoreEnterpriseRbac.service.managerPort }} max_request_threads: {{ default 50 .Values.anchoreEnterpriseRbac.maxRequestThreads }} @@ -111,7 +124,7 @@ data: reports: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.api.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.api.serviceName" . }} listen: '0.0.0.0' port: {{ .Values.anchoreEnterpriseReports.service.apiPort }} max_request_threads: {{ default 50 .Values.anchoreApi.maxRequestThreads }} @@ -129,7 +142,7 @@ data: reports_worker: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.enterprise-reports.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.enterprise-reports.serviceName" . }} listen: '0.0.0.0' port: {{ .Values.anchoreEnterpriseReports.service.workerPort }} max_request_threads: {{ default 50 .Values.anchoreEnterpriseReports.maxRequestThreads }} @@ -140,6 +153,11 @@ data: data_load_max_workers: {{ .Values.anchoreEnterpriseReports.dataLoadMaxWorkers }} cycle_timers: {{- toYaml .Values.anchoreEnterpriseReports.cycleTimers | nindent 10 }} + runtime_report_generation: + inventory_images_by_vulnerability: true + vulnerabilities_by_k8s_namespace: {{ ne .Values.anchoreEnterpriseReports.vulnerabilitiesByK8sNamespace false }} + vulnerabilities_by_k8s_container: {{ ne .Values.anchoreEnterpriseReports.vulnerabilitiesByK8sContainer false }} + vulnerabilities_by_ecs_container: {{ ne .Values.anchoreEnterpriseReports.vulnerabilitiesByEcsContainer false }} {{- if .Values.anchoreEnterpriseRbac.enabled }} authorization_handler: external authorization_handler_config: @@ -155,7 +173,7 @@ data: notifications: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.enterprise-notifications.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.enterprise-notifications.serviceName" . }} listen: '0.0.0.0' port: {{ .Values.anchoreEnterpriseNotifications.service.port }} max_request_threads: {{ default 50 .Values.anchoreEnterpriseNotifications.maxRequestThreads }} @@ -167,7 +185,7 @@ data: {{- if .Values.anchoreEnterpriseNotifications.uiUrl }} ui_url: "{{ .Values.anchoreEnterpriseNotifications.uiUrl }}" {{- else }} - ui_url: {{ include "anchore-engine.enterprise-ui.fullname" . | quote }} + ui_url: {{ include "anchore-engine.enterprise-ui.serviceName" . | quote }} {{- end }} {{- end }} {{- end -}} diff --git a/stable/anchore-engine/templates/enterprise_feeds_configmap.yaml b/stable/anchore-engine/templates/enterprise_feeds_configmap.yaml index deae4f5b..9c9c9b8d 100644 --- a/stable/anchore-engine/templates/enterprise_feeds_configmap.yaml +++ b/stable/anchore-engine/templates/enterprise_feeds_configmap.yaml @@ -9,9 +9,9 @@ {{- $grypeProviderFeedsExternalURL = (printf "%s/v1/" $anchoreFeedsHost) -}} {{- else -}} {{- if .Values.anchoreGlobal.internalServicesSsl.enabled -}} - {{- $grypeProviderFeedsExternalURL = (printf "https://%s:%s/v1/" (include "anchore-engine.enterprise-feeds.fullname" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} + {{- $grypeProviderFeedsExternalURL = (printf "https://%s:%s/v1/" (include "anchore-engine.enterprise-feeds.serviceName" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} {{- else -}} - {{- $grypeProviderFeedsExternalURL = (printf "http://%s:%s/v1/" (include "anchore-engine.enterprise-feeds.fullname" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} + {{- $grypeProviderFeedsExternalURL = (printf "http://%s:%s/v1/" (include "anchore-engine.enterprise-feeds.serviceName" .) (.Values.anchoreEnterpriseFeeds.service.port | toString) ) -}} {{- end -}} {{- end -}} @@ -45,7 +45,7 @@ data: license_file: /home/anchore/license.yaml metrics: enabled: {{ .Values.anchoreGlobal.enableMetrics }} - auth_disabled: {{ .Values.anchoreGlobal.metricsAuthDisabled }} + auth_disabled: true # Locations for keys used for signing and encryption. Only one of 'secret' or 'public_key_path'/'private_key_path' needs to be set. If all are set then the keys take precedence over the secret value # Secret is for a shared secret and if set, all components in anchore should have the exact same value in their configs. @@ -65,24 +65,36 @@ data: oauth: enabled: {{ .Values.anchoreGlobal.oauthEnabled }} default_token_expiration_seconds: {{ .Values.anchoreGlobal.oauthTokenExpirationSeconds }} + refresh_token_expiration_seconds: {{ .Values.anchoreGlobal.oauthRefreshTokenExpirationSeconds }} # Set this to True to enable storing user passwords only as secure hashes in the db. This can dramatically increase CPU usage if you # don't also use oauth and tokens for internal communications (which requires keys/secret to be configured as well) # WARNING: you should not change this after a system has been initialized as it may cause a mismatch in existing passwords hashed_passwords: {{ .Values.anchoreGlobal.hashedPasswords }} + # Set this to True in order to disable the SSO JIT provisioning during authentication. This provides an additional + # layer of security and configuration for SSO users to gain access to Anchore. This is disabled by default. + sso_require_existing_users: {{ .Values.anchoreGlobal.ssoRequireExistingUsers }} + credentials: database: - {{- if not .Values.anchoreEnterpriseFeeds.dbConfig.ssl }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_FEEDS_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}" - {{- else if eq .Values.anchoreEnterpriseFeeds.dbConfig.sslMode "require" }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_FEEDS_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}}" - {{- else }} - db_connect: "postgresql://${ANCHORE_DB_USER}:${ANCHORE_FEEDS_DB_PASSWORD}@${ANCHORE_DB_HOST}/${ANCHORE_DB_NAME}?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}}&sslrootcert=/home/anchore/certs/{{- .Values.anchoreEnterpriseFeeds.dbConfig.sslRootCertName }}" - {{- end }} + user: "${ANCHORE_FEEDS_DB_USER}" + password: "${ANCHORE_FEEDS_DB_PASSWORD}" + host: "${ANCHORE_FEEDS_DB_HOST}" + port: "${ANCHORE_FEEDS_DB_PORT}" + name: "${ANCHORE_FEEDS_DB_NAME}" + db_connect_args: timeout: {{ .Values.anchoreEnterpriseFeeds.dbConfig.timeout }} + {{- if .Values.anchoreGlobal.dbConfig.ssl }} + ssl: true + sslmode: {{ .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}} + {{- if not (eq .Values.anchoreEnterpriseFeeds.dbConfig.sslMode "require") }} + sslrootcert: /home/anchore/certs/{{- .Values.anchoreEnterpriseFeeds.dbConfig.sslRootCertName -}} + {{- end }} + {{- else }} ssl: false + {{- end }} db_pool_size: {{ .Values.anchoreEnterpriseFeeds.dbConfig.connectionPoolSize }} db_pool_max_overflow: {{ .Values.anchoreEnterpriseFeeds.dbConfig.connectionPoolMaxOverflow }} {{- with .Values.anchoreEnterpriseFeeds.dbConfig.engineArgs }} @@ -93,7 +105,7 @@ data: feeds: enabled: true require_auth: true - endpoint_hostname: {{ template "anchore-engine.enterprise-feeds.fullname" . }} + endpoint_hostname: {{ template "anchore-engine.enterprise-feeds.serviceName" . }} listen: 0.0.0.0 port: {{ .Values.anchoreEnterpriseFeeds.service.port }} max_request_threads: {{ default 50 .Values.anchoreEnterpriseFeeds.maxRequestThreads }} @@ -121,13 +133,23 @@ data: alpine: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.alpineDriverEnabled | quote) }} centos: - enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.centosDriverEnabled | quote) }} + enabled: false debian: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.debianDriverEnabled | quote) }} + {{- if .Values.anchoreEnterpriseFeeds.debianExtraReleases }} + releases: + {{- toYaml .Values.anchoreEnterpriseFeeds.debianExtraReleases | nindent 14 }} + {{- end }} ol: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.olDriverEnabled | quote) }} ubuntu: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.ubuntuDriverEnabled | quote) }} + {{- if .Values.anchoreEnterpriseFeeds.ubuntuExtraReleases }} + releases: + {{- toYaml .Values.anchoreEnterpriseFeeds.ubuntuExtraReleases | nindent 14 }} + {{- end }} + git_url: {{ (.Values.anchoreEnterpriseFeeds.ubuntuDriverGitUrl | quote) }} + git_branch: {{ (.Values.anchoreEnterpriseFeeds.ubuntuDriverGitBranch | quote) }} rhel: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.rhelDriverEnabled | quote) }} concurrency: {{ .Values.anchoreEnterpriseFeeds.rhelDriverConcurrency }} @@ -141,14 +163,17 @@ data: # To enable gem driver comment the enabled property and uncomment the db_connect property. enabled: {{ .Values.anchoreEnterpriseFeeds.gemDriverEnabled | quote }} {{- if .Values.anchoreEnterpriseFeeds.gemDriverEnabled }} - db_connect: "postgresql://${ANCHORE_GEM_DB_USER}:${ANCHORE_GEM_DB_PASSWORD}@${ANCHORE_GEM_DB_HOST}/${ANCHORE_GEM_DB_NAME}" + db_connect: "postgresql://${ANCHORE_GEM_DB_USER}:${ANCHORE_GEM_DB_PASSWORD}@${ANCHORE_GEM_DB_HOST}:${ANCHORE_GEM_DB_PORT}/${ANCHORE_GEM_DB_NAME}" {{- end }} nvdv2: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.nvdv2DriverEnabled | quote) }} - vulndb: - enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.vulndbDriverEnabled | quote) }} + {{- if .Values.anchoreEnterpriseFeeds.useNvdDriverApiKey }} + api_key: ${ANCHORE_NVD_API_KEY} + {{- end }} sles: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.slesDriverEnabled | quote) }} + mariner: + enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.marinerDriverEnabled) }} msrc: enabled: {{ .Values.anchoreEnterpriseFeeds.msrcDriverEnabled | quote }} {{- with .Values.anchoreEnterpriseFeeds.msrcWhitelist }} @@ -163,6 +188,17 @@ data: grypedb: enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.grypeDriverEnabled | quote) }} external_feeds_url: {{ $grypeProviderFeedsExternalURL }} + preload: + enabled: {{ default true (.Values.anchoreEnterpriseFeeds.grypedbPreloadEnabled) }} + workspace_archive_path: {{ default "/preload/grype-db-workspace.tar.gz" (.Values.anchoreEnterpriseFeeds.grypedbPreloadWorkspaceArchivePath) }} + persist_provider_workspaces: {{ default true (.Values.anchoreEnterpriseFeeds.grypedbPersistProviderWorkspaces) }} + restore_provider_workspaces: {{ default true (.Values.anchoreEnterpriseFeeds.grypedbRestoreProviderWorkspaces) }} + anchore_match_exclusions: + enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.anchoreMatchExclusionsEnabled | quote) }} + wolfi: + enabled: {{ default "true" (.Values.anchoreEnterpriseFeeds.wolfiDriverEnabled | quote) }} + chainguard: + enabled: {{ default true (.Values.anchoreEnterpriseFeeds.chainguardDriverEnabled) }} {{- if .Values.anchoreGlobal.internalServicesSsl.enabled }} ssl_enable: {{ .Values.anchoreGlobal.internalServicesSsl.enabled }} ssl_cert: "/home/anchore/certs/{{- .Values.anchoreGlobal.internalServicesSsl.certSecretCertName }}" diff --git a/stable/anchore-engine/templates/enterprise_feeds_configmap_env.yaml b/stable/anchore-engine/templates/enterprise_feeds_configmap_env.yaml deleted file mode 100644 index 7aaabbc5..00000000 --- a/stable/anchore-engine/templates/enterprise_feeds_configmap_env.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseFeeds.enabled -}} -{{- $component := "enterprise-feeds" -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "anchore-engine.enterprise-feeds.fullname" . }}-env - labels: - app: {{ template "anchore-engine.fullname" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - component: {{ $component }} - {{- with .Values.anchoreGlobal.labels }} - {{ toYaml . | nindent 4 }} - {{- end }} -data: - ANCHORE_DB_NAME: {{ index .Values "anchore-feeds-db" "postgresDatabase" | quote }} - ANCHORE_DB_USER: {{ index .Values "anchore-feeds-db" "postgresUser" | quote }} - {{- if and (index .Values "anchore-feeds-db" "externalEndpoint") (not (index .Values "anchore-feeds-db" "enabled")) }} - ANCHORE_DB_HOST: {{ index .Values "anchore-feeds-db" "externalEndpoint" | quote }} - {{- else if and (index .Values "cloudsql" "enabled") (not (index .Values "anchore-feeds-db" "enabled")) }} - ANCHORE_DB_HOST: "localhost:5432" - {{- else }} - ANCHORE_DB_HOST: "{{ template "postgres.anchore-feeds-db.fullname" . }}:5432" - {{- end }} - {{- if .Values.anchoreEnterpriseFeeds.gemDriverEnabled }} - ANCHORE_GEM_DB_NAME: {{ index .Values "anchore-feeds-gem-db" "postgresDatabase" | quote }} - ANCHORE_GEM_DB_USER: {{ index .Values "anchore-feeds-gem-db" "postgresUser" | quote }} - ANCHORE_GEM_DB_HOST: "{{ default (include "postgres.anchore-feeds-gem-db.fullname" .) (index .Values "anchore-feeds-gem-db" "externalEndpoint") }}:5432" - {{- end }} -{{- end }} diff --git a/stable/anchore-engine/templates/enterprise_feeds_deployment.yaml b/stable/anchore-engine/templates/enterprise_feeds_deployment.yaml index 47ed57a9..b8e4d615 100644 --- a/stable/anchore-engine/templates/enterprise_feeds_deployment.yaml +++ b/stable/anchore-engine/templates/enterprise_feeds_deployment.yaml @@ -52,10 +52,9 @@ spec: {{- with .Values.anchoreEnterpriseFeeds.annotations }} {{ toYaml . | nindent 8 }} {{- end }} - {{- if not (or .Values.inject_secrets_via_env .Values.anchoreEnterpriseFeeds.existingSecret) }} + {{- if not (or .Values.inject_secrets_via_env .Values.anchoreGlobal.useExistingSecrets) }} checksum/secrets: {{ include (print $.Template.BasePath "/enterprise_feeds_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/enterprise_feeds_configmap_env.yaml") . | sha256sum }} checksum/feeds-config: {{ include (print $.Template.BasePath "/enterprise_feeds_configmap.yaml") . | sha256sum }} spec: {{- with .Values.anchoreGlobal.securityContext }} @@ -64,27 +63,47 @@ spec: {{- end }} imagePullSecrets: - name: {{ .Values.anchoreEnterpriseGlobal.imagePullSecretName }} - {{- if and .Values.anchoreGlobal.scratchVolume.fixGroupPermissions .Values.anchoreGlobal.securityContext.fsGroup }} + {{- if and (or .Values.anchoreGlobal.scratchVolume.fixGroupPermissions .Values.anchoreEnterpriseFeeds.persistence.fixGroupPermissions) .Values.anchoreGlobal.securityContext.fsGroup }} initContainers: - name: mode-fixer - image: alpine + image: {{ .Values.anchoreGlobal.scratchVolume.initContainerImage }} securityContext: runAsUser: 0 volumeMounts: + {{- if .Values.anchoreGlobal.scratchVolume.fixGroupPermissions }} - name: {{ $component }}-scratch mountPath: {{ .Values.anchoreGlobal.scratchVolume.mountPath }} - command: - - sh - - -c + {{- end }} + {{- if .Values.anchoreEnterpriseFeeds.persistence.fixGroupPermissions }} + - name: data + mountPath: {{ .Values.anchoreEnterpriseFeeds.persistence.mountPath }} + subPath: {{ .Values.anchoreEnterpriseFeeds.persistence.subPath }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + {{- if .Values.anchoreGlobal.scratchVolume.fixGroupPermissions }} - (chmod 0775 {{ .Values.anchoreGlobal.scratchVolume.mountPath }}; chgrp {{ .Values.anchoreGlobal.securityContext.fsGroup }} {{ .Values.anchoreGlobal.scratchVolume.mountPath }} ) + {{- end }} + {{- if .Values.anchoreEnterpriseFeeds.persistence.fixGroupPermissions }} + - (chmod 0775 {{ .Values.anchoreEnterpriseFeeds.persistence.mountPath }}; chgrp {{ .Values.anchoreGlobal.securityContext.fsGroup }} {{ .Values.anchoreEnterpriseFeeds.persistence.mountPath }} ) + {{- end }} {{- end }} containers: {{- if .Values.cloudsql.enabled }} - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -97,15 +116,21 @@ spec: - name: "{{ .Chart.Name }}-{{ $component }}" image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "feeds"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade feeds ports: - containerPort: {{ .Values.anchoreEnterpriseFeeds.service.port }} name: feeds-api envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreEnterpriseFeeds.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreEnterpriseFeeds.existingSecret }} + name: {{ .Values.anchoreEnterpriseFeeds.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.enterprise-feeds.fullname" . }} @@ -113,8 +138,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.enterprise-feeds.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -151,6 +174,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -214,6 +240,9 @@ spec: secret: secretName: {{ . }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchoreEnterpriseFeeds.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -226,7 +255,7 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.enterprise-feeds.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} @@ -234,7 +263,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.enterprise-feeds.fullname" . }} + name: {{ template "anchore-engine.enterprise-feeds.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -258,6 +287,9 @@ spec: port: {{ .Values.anchoreEnterpriseFeeds.service.port }} targetPort: {{ .Values.anchoreEnterpriseFeeds.service.port }} protocol: TCP + {{- with .Values.anchoreEnterpriseFeeds.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} diff --git a/stable/anchore-engine/templates/enterprise_feeds_secret.yaml b/stable/anchore-engine/templates/enterprise_feeds_secret.yaml index 86da6f50..80a7778c 100644 --- a/stable/anchore-engine/templates/enterprise_feeds_secret.yaml +++ b/stable/anchore-engine/templates/enterprise_feeds_secret.yaml @@ -1,4 +1,5 @@ -{{- if not .Values.anchoreEnterpriseFeeds.existingSecret }} +{{- template "checkUpgradeForExistingSecret" . }} +{{- if not .Values.anchoreGlobal.useExistingSecrets }} {{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseFeeds.enabled }} apiVersion: v1 kind: Secret @@ -14,7 +15,11 @@ metadata: {{- end }} type: Opaque stringData: + ANCHORE_FEEDS_DB_NAME: {{ index .Values "anchore-feeds-db" "postgresDatabase" | quote }} + ANCHORE_FEEDS_DB_USER: {{ index .Values "anchore-feeds-db" "postgresUser" | quote }} ANCHORE_FEEDS_DB_PASSWORD: {{ index .Values "anchore-feeds-db" "postgresPassword" | quote }} + ANCHORE_FEEDS_DB_HOST: "{{ template "feeds-db-hostname" . }}" + ANCHORE_FEEDS_DB_PORT: {{ index .Values "anchore-feeds-db" "postgresPort" | quote }} {{- with .Values.anchoreGlobal.saml.secret }} ANCHORE_SAML_SECRET: {{ . }} {{- end }} @@ -24,8 +29,15 @@ stringData: {{- with .Values.anchoreEnterpriseFeeds.githubDriverToken }} ANCHORE_GITHUB_TOKEN: {{ . | quote }} {{- end }} + {{- with .Values.anchoreEnterpriseFeeds.nvdDriverApiKey }} + ANCHORE_NVD_API_KEY: {{ . | quote }} + {{- end }} {{- if .Values.anchoreEnterpriseFeeds.gemDriverEnabled }} + ANCHORE_GEM_DB_NAME: {{ index .Values "anchore-feeds-gem-db" "postgresDatabase" | quote }} + ANCHORE_GEM_DB_USER: {{ index .Values "anchore-feeds-gem-db" "postgresUser" | quote }} ANCHORE_GEM_DB_PASSWORD: {{ index .Values "anchore-feeds-gem-db" "postgresPassword" | quote }} + ANCHORE_GEM_DB_HOST: "{{ default (include "postgres.anchore-feeds-gem-db.fullname" .) (index .Values "anchore-feeds-gem-db" "externalEndpoint") }}" + ANCHORE_GEM_DB_PORT: {{ index .Values "anchore-feeds-gem-db" "postgresPort" | quote }} {{- end }} {{- end }} {{- end }} diff --git a/stable/anchore-engine/templates/enterprise_feeds_upgrade_job.yaml b/stable/anchore-engine/templates/enterprise_feeds_upgrade_job.yaml index 1258be81..03473ec1 100644 --- a/stable/anchore-engine/templates/enterprise_feeds_upgrade_job.yaml +++ b/stable/anchore-engine/templates/enterprise_feeds_upgrade_job.yaml @@ -1,4 +1,5 @@ -{{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseFeeds.enabled .Values.anchoreEnterpriseFeedsUpgradeJob.enabled }} +{{- template "checkUpgradeCompatibility" . }} +{{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseFeeds.enabled .Values.anchoreEnterpriseFeedsUpgradeJob.enabled (not .Values.anchoreGlobal.usePreupgradeHook) }} apiVersion: batch/v1 kind: Job metadata: @@ -8,6 +9,12 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name | quote }} app.kubernetes.io/version: {{ .Chart.AppVersion }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} annotations: "helm.sh/hook": post-upgrade "helm.sh/hook-weight": "-3" @@ -19,6 +26,14 @@ spec: app.kubernetes.io/managed-by: {{ .Release.Service | quote }} app.kubernetes.io/instance: {{ .Release.Name | quote }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "anchore-engine.fullname" . }} + component: anchore-enterprise-feeds-upgrade + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} annotations: {{- with .Values.anchoreGlobal.annotations }} {{ toYaml . | nindent 8 }} @@ -42,8 +57,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -60,26 +84,34 @@ spec: args: {{- if not .Values.anchoreEnterpriseFeeds.dbConfig.ssl }} - | - anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}" upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}" upgrade --dontask; anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}" pre-upgrade-check; {{- else if eq .Values.anchoreEnterpriseFeeds.dbConfig.sslMode "require" }} - | - anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode }} upgrade --dontask; anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode }} pre-upgrade-check; {{- else }} - | - anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreEnterpriseFeeds.dbConfig.sslRootCertName }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreEnterpriseFeeds.dbConfig.sslRootCertName }} upgrade --dontask; anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreEnterpriseFeeds.dbConfig.sslRootCertName }} pre-upgrade-check; {{- end }} {{- if .Values.cloudsql.enabled }} - sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; + - sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; securityContext: + {{- with .Values.anchoreGlobal.containerSecurityContext }} + {{ toYaml . | nindent 10 }} + {{- end }} capabilities: add: - SYS_PTRACE + {{- else }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreEnterpriseFeeds.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreEnterpriseFeeds.existingSecret }} + name: {{ .Values.anchoreEnterpriseFeeds.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.enterprise-feeds.fullname" . }} @@ -87,8 +119,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.enterprise-feeds.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -96,15 +126,18 @@ spec: {{- with .Values.anchoreEnterpriseFeeds.extraEnv }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if (.Values.anchoreGlobal.certStoreSecretName) }} volumeMounts: + {{- if (.Values.anchoreGlobal.certStoreSecretName) }} - name: certs mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} resources: {{ toYaml .Values.anchoreEnterpriseFeedsUpgradeJob.resources | nindent 10 }} - {{- if or .Values.anchoreGlobal.certStoreSecretName .Values.cloudsql.useExistingServiceAcc }} + {{- if or .Values.anchoreGlobal.certStoreSecretName .Values.cloudsql.useExistingServiceAcc .Values.anchoreGlobal.extraVolumes }} volumes: {{- with .Values.anchoreGlobal.certStoreSecretName }} - name: certs @@ -116,6 +149,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.nodeSelector }} nodeSelector: diff --git a/stable/anchore-engine/templates/enterprise_notifications_deployment.yaml b/stable/anchore-engine/templates/enterprise_notifications_deployment.yaml index 4cc03314..d14ea071 100644 --- a/stable/anchore-engine/templates/enterprise_notifications_deployment.yaml +++ b/stable/anchore-engine/templates/enterprise_notifications_deployment.yaml @@ -55,7 +55,6 @@ spec: {{- if not .Values.inject_secrets_via_env }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} spec: {{- with .Values.anchoreGlobal.securityContext }} @@ -69,8 +68,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -83,15 +91,21 @@ spec: - name: "{{ .Chart.Name }}-{{ $component }}" image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "notifications"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade notifications ports: - containerPort: {{ .Values.anchoreEnterpriseNotifications.service.port }} name: notify envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -99,8 +113,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -132,6 +144,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -161,12 +176,18 @@ spec: - name: {{ .Chart.Name }}-rbac-authorizer image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "rbac_authorizer"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade rbac_authorizer envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -174,8 +195,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -210,6 +229,9 @@ spec: - name: run mountPath: /var/run/anchore {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: exec: command: @@ -259,6 +281,9 @@ spec: secret: secretName: {{ . }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchoreEnterpriseNotifications.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -271,7 +296,7 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.enterprise-notifications.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} @@ -279,7 +304,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.enterprise-notifications.fullname" . }} + name: {{ template "anchore-engine.enterprise-notifications.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -303,7 +328,10 @@ spec: port: {{ .Values.anchoreEnterpriseNotifications.service.port }} targetPort: {{ .Values.anchoreEnterpriseNotifications.service.port }} protocol: TCP + {{- with .Values.anchoreEnterpriseNotifications.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/stable/anchore-engine/templates/enterprise_rbac_manager_deployment.yaml b/stable/anchore-engine/templates/enterprise_rbac_manager_deployment.yaml index b5027ed0..2002877a 100644 --- a/stable/anchore-engine/templates/enterprise_rbac_manager_deployment.yaml +++ b/stable/anchore-engine/templates/enterprise_rbac_manager_deployment.yaml @@ -55,7 +55,6 @@ spec: {{- if not .Values.inject_secrets_via_env }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} spec: {{- with .Values.anchoreGlobal.securityContext }} @@ -69,8 +68,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -83,15 +91,21 @@ spec: - name: "{{ .Chart.Name }}-{{ $component }}" image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "rbac_manager"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade rbac_manager ports: - containerPort: {{ .Values.anchoreEnterpriseRbac.service.managerPort }} name: rbac envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -99,8 +113,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -132,6 +144,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -160,12 +175,18 @@ spec: - name: {{ .Chart.Name }}-rbac-authorizer image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "rbac_authorizer"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade rbac_authorizer envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -173,8 +194,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -209,6 +228,9 @@ spec: - name: run mountPath: /var/run/anchore {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: exec: command: @@ -257,6 +279,9 @@ spec: secret: secretName: {{ . }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchoreEnterpriseRbac.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -269,7 +294,7 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.enterprise-rbac.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} @@ -277,7 +302,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.enterprise-rbac.fullname" . }} + name: {{ template "anchore-engine.enterprise-rbac.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -301,6 +326,9 @@ spec: port: {{ .Values.anchoreEnterpriseRbac.service.managerPort }} targetPort: {{ .Values.anchoreEnterpriseRbac.service.managerPort }} protocol: TCP + {{- with .Values.anchoreEnterpriseRbac.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} diff --git a/stable/anchore-engine/templates/enterprise_reports_deployment.yaml b/stable/anchore-engine/templates/enterprise_reports_deployment.yaml index cdbbe713..77292dc3 100644 --- a/stable/anchore-engine/templates/enterprise_reports_deployment.yaml +++ b/stable/anchore-engine/templates/enterprise_reports_deployment.yaml @@ -55,7 +55,6 @@ spec: {{- if not .Values.inject_secrets_via_env }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} spec: {{- with .Values.anchoreGlobal.securityContext }} @@ -69,8 +68,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -83,15 +91,21 @@ spec: - name: "{{ .Chart.Name }}-{{ $component }}" image: {{ .Values.anchoreEnterpriseGlobal.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseGlobal.imagePullPolicy }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "reports_worker"] + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade reports_worker ports: - containerPort: {{ .Values.anchoreEnterpriseReports.service.workerPort }} name: reports envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -99,8 +113,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -132,6 +144,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -182,6 +197,9 @@ spec: secret: secretName: {{ . }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchoreEnterpriseReports.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -194,7 +212,7 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.enterprise-reports.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} @@ -202,7 +220,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.enterprise-reports.fullname" . }} + name: {{ template "anchore-engine.enterprise-reports.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -226,7 +244,10 @@ spec: port: {{ .Values.anchoreEnterpriseReports.service.workerPort }} targetPort: {{ .Values.anchoreEnterpriseReports.service.workerPort }} protocol: TCP + {{- with .Values.anchoreEnterpriseReports.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/stable/anchore-engine/templates/enterprise_ui_configmap.yaml b/stable/anchore-engine/templates/enterprise_ui_configmap.yaml index 2d9db880..10a5a106 100644 --- a/stable/anchore-engine/templates/enterprise_ui_configmap.yaml +++ b/stable/anchore-engine/templates/enterprise_ui_configmap.yaml @@ -17,31 +17,31 @@ metadata: data: config-ui.yaml: | {{- if .Values.anchoreGlobal.internalServicesSsl.enabled }} - engine_uri: 'https://{{ template "anchore-engine.api.fullname" . }}:{{ .Values.anchoreApi.service.port }}/v1' + engine_uri: 'https://{{ template "anchore-engine.api.serviceName" . }}:{{ .Values.anchoreApi.service.port }}/v1' {{- else }} - engine_uri: 'http://{{ template "anchore-engine.api.fullname" . }}:{{ .Values.anchoreApi.service.port }}/v1' + engine_uri: 'http://{{ template "anchore-engine.api.serviceName" . }}:{{ .Values.anchoreApi.service.port }}/v1' {{- end }} # This value is overridden by using the `ANCHORE_REDIS_URI` environment variable. # redis_ui: $ANCHORE_REDIS_URI {{- if .Values.anchoreEnterpriseRbac.enabled }} {{- if .Values.anchoreGlobal.internalServicesSsl.enabled }} - rbac_uri: 'https://{{ template "anchore-engine.enterprise-rbac.fullname" . }}:{{ .Values.anchoreEnterpriseRbac.service.managerPort }}/v1' + rbac_uri: 'https://{{ template "anchore-engine.enterprise-rbac.serviceName" . }}:{{ .Values.anchoreEnterpriseRbac.service.managerPort }}/v1' {{- else }} - rbac_uri: 'http://{{ template "anchore-engine.enterprise-rbac.fullname" . }}:{{ .Values.anchoreEnterpriseRbac.service.managerPort }}/v1' + rbac_uri: 'http://{{ template "anchore-engine.enterprise-rbac.serviceName" . }}:{{ .Values.anchoreEnterpriseRbac.service.managerPort }}/v1' {{- end }} {{- end }} {{- if .Values.anchoreEnterpriseReports.enabled }} {{- if .Values.anchoreGlobal.internalServicesSsl.enabled }} - reports_uri: 'https://{{ template "anchore-engine.api.fullname" . }}:{{ .Values.anchoreEnterpriseReports.service.apiPort}}/v1' + reports_uri: 'https://{{ template "anchore-engine.api.serviceName" . }}:{{ .Values.anchoreEnterpriseReports.service.apiPort}}/v1' {{- else}} - reports_uri: 'http://{{ template "anchore-engine.api.fullname" . }}:{{ .Values.anchoreEnterpriseReports.service.apiPort}}/v1' + reports_uri: 'http://{{ template "anchore-engine.api.serviceName" . }}:{{ .Values.anchoreEnterpriseReports.service.apiPort}}/v1' {{- end }} {{- end }} {{- if .Values.anchoreEnterpriseNotifications.enabled }} {{- if .Values.anchoreGlobal.internalServicesSsl.enabled }} - notifications_uri: 'https://{{ template "anchore-engine.enterprise-notifications.fullname" . }}:{{ .Values.anchoreEnterpriseNotifications.service.port}}/v1' + notifications_uri: 'https://{{ template "anchore-engine.enterprise-notifications.serviceName" . }}:{{ .Values.anchoreEnterpriseNotifications.service.port}}/v1' {{- else}} - notifications_uri: 'http://{{ template "anchore-engine.enterprise-notifications.fullname" . }}:{{ .Values.anchoreEnterpriseNotifications.service.port}}/v1' + notifications_uri: 'http://{{ template "anchore-engine.enterprise-notifications.serviceName" . }}:{{ .Values.anchoreEnterpriseNotifications.service.port}}/v1' {{- end }} {{- end }} # This value is overridden by using the `ANCHORE_APPDB_URI` environment variable. @@ -53,7 +53,7 @@ data: allow_shared_login: {{ .Values.anchoreEnterpriseUi.enableSharedLogin }} redis_flushdb: {{ .Values.anchoreEnterpriseUi.redisFlushdb }} force_websocket: {{ .Values.anchoreEnterpriseUi.forceWebsocket }} - authentication_lock: + authentication_lock: count: {{ .Values.anchoreEnterpriseUi.authenticationLock.count }} expires: {{ .Values.anchoreEnterpriseUi.authenticationLock.expires }} {{- with .Values.anchoreEnterpriseUi.customLinks }} @@ -72,4 +72,7 @@ data: admin: {{ .admin }} standard: {{ .standard }} {{- end }} + log_level: {{ .Values.anchoreEnterpriseUi.logLevel | squote }} + enrich_inventory_view: {{ .Values.anchoreEnterpriseUi.enrichInventoryView }} + enable_prometheus_metrics: {{ .Values.anchoreGlobal.enableMetrics }} {{- end -}} diff --git a/stable/anchore-engine/templates/enterprise_ui_deployment.yaml b/stable/anchore-engine/templates/enterprise_ui_deployment.yaml index 3704697c..7c5d8b31 100644 --- a/stable/anchore-engine/templates/enterprise_ui_deployment.yaml +++ b/stable/anchore-engine/templates/enterprise_ui_deployment.yaml @@ -56,7 +56,7 @@ spec: {{- with .Values.anchoreEnterpriseUi.annotations }} {{ toYaml . | nindent 8 }} {{- end }} - {{- if not (or .Values.inject_secrets_via_env .Values.anchoreEnterpriseUi.existingSecret) }} + {{- if not (or .Values.inject_secrets_via_env .Values.anchoreGlobal.useExistingSecrets) }} checksum/secrets: {{ include (print $.Template.BasePath "/enterprise_ui_secret.yaml") . | sha256sum }} {{- end }} checksum/ui-config: {{ include (print $.Template.BasePath "/enterprise_ui_configmap.yaml") . | sha256sum }} @@ -72,8 +72,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -86,11 +95,14 @@ spec: - name: "{{ .Chart.Name }}-{{ $component }}" image: {{ .Values.anchoreEnterpriseUi.image }} imagePullPolicy: {{ .Values.anchoreEnterpriseUi.imagePullPolicy }} - env: - {{- if and (index .Values "anchoreEnterpriseUi" "existingSecret") (not (index .Values "anchore-ui-redis" "externalEndpoint")) }} - - name: ANCHORE_REDIS_URI - value: redis://nouser:{{ index .Values "anchore-ui-redis" "password" }}@{{ template "redis.fullname" . }}-master:6379 + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh node /home/node/aui/build/server.js + env: {{ if .Values.anchoreGlobal.dbConfig.ssl }} - name: PGSSLROOTCERT value: /home/anchore/certs/{{ .Values.anchoreGlobal.dbConfig.sslRootCertName }} @@ -107,14 +119,12 @@ spec: {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreEnterpriseUi.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreEnterpriseUi.existingSecret }} + name: {{ .Values.anchoreEnterpriseUi.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.enterprise-ui.fullname" . }} - - secretRef: - name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} ports: @@ -133,6 +143,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: tcpSocket: port: enterprise-ui @@ -168,6 +181,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- with .Values.anchoreEnterpriseUi.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -180,15 +196,15 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.enterprise-ui.serviceAccountName" . }} serviceAccountName: {{ . }} - {{- end }} + {{- end }} --- apiVersion: v1 kind: Service metadata: - name: {{ include "anchore-engine.enterprise-ui.fullname" . | quote }} + name: {{ include "anchore-engine.enterprise-ui.serviceName" . | quote }} labels: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} @@ -213,6 +229,9 @@ spec: port: {{ .Values.anchoreEnterpriseUi.service.port }} protocol: TCP targetPort: 3000 + {{- with .Values.anchoreEnterpriseUi.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} diff --git a/stable/anchore-engine/templates/enterprise_ui_secret.yaml b/stable/anchore-engine/templates/enterprise_ui_secret.yaml index 90df31cf..e13e7688 100644 --- a/stable/anchore-engine/templates/enterprise_ui_secret.yaml +++ b/stable/anchore-engine/templates/enterprise_ui_secret.yaml @@ -1,4 +1,5 @@ -{{- if not .Values.anchoreEnterpriseUi.existingSecret }} +{{- template "checkUpgradeForExistingSecret" . }} +{{- if not .Values.anchoreGlobal.useExistingSecrets }} {{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseUi.enabled }} apiVersion: v1 kind: Secret @@ -15,15 +16,14 @@ metadata: type: Opaque stringData: {{- if .Values.anchoreGlobal.dbConfig.ssl }} - ANCHORE_APPDB_URI: 'postgresql://{{ index .Values "postgresql" "postgresUser" }}:{{ index .Values "postgresql" "postgresPassword" }}@{{ template "db-hostname" . }}/{{ index .Values "postgresql" "postgresDatabase" }}?ssl=verify-full' + ANCHORE_APPDB_URI: 'postgresql://{{ ternary (index .Values "anchoreEnterpriseUi" "dbUser") (index .Values "postgresql" "postgresUser") (hasKey .Values.anchoreEnterpriseUi "dbUser" ) }}:{{ ternary (index .Values "anchoreEnterpriseUi" "dbPass") (index .Values "postgresql" "postgresPassword") (hasKey .Values.anchoreEnterpriseUi "dbPass" ) }}@{{ template "db-hostname" . }}/{{ index .Values "postgresql" "postgresDatabase" }}?ssl=verify-full' {{- else }} - ANCHORE_APPDB_URI: 'postgresql://{{ index .Values "postgresql" "postgresUser" }}:{{ index .Values "postgresql" "postgresPassword" }}@{{ template "db-hostname" . }}/{{ index .Values "postgresql" "postgresDatabase" }}' + ANCHORE_APPDB_URI: 'postgresql://{{ ternary (index .Values "anchoreEnterpriseUi" "dbUser") (index .Values "postgresql" "postgresUser") (hasKey .Values.anchoreEnterpriseUi "dbUser" ) }}:{{ ternary (index .Values "anchoreEnterpriseUi" "dbPass") (index .Values "postgresql" "postgresPassword") (hasKey .Values.anchoreEnterpriseUi "dbPass" ) }}@{{ template "db-hostname" . }}/{{ index .Values "postgresql" "postgresDatabase" }}' {{- end }} - - {{- if and (index .Values "anchore-ui-redis" "externalEndpoint") (not (index .Values "anchore-ui-redis" "enabled")) }} - ANCHORE_REDIS_URI: '{{ index .Values "anchore-ui-redis" "externalEndpoint" }}' + {{- if and (index .Values "ui-redis" "externalEndpoint") (not (index .Values "ui-redis" "enabled")) }} + ANCHORE_REDIS_URI: '{{ index .Values "ui-redis" "externalEndpoint" }}' {{- else }} - ANCHORE_REDIS_URI: 'redis://nouser:{{ index .Values "anchore-ui-redis" "password" }}@{{ template "redis.fullname" . }}-master:6379' + ANCHORE_REDIS_URI: 'redis://nouser:{{ index .Values "ui-redis" "auth" "password" }}@{{ template "redis.fullname" . }}-master:6379' {{- end }} {{- end }} {{- end }} diff --git a/stable/anchore-engine/templates/enterprise_upgrade_job.yaml b/stable/anchore-engine/templates/enterprise_upgrade_job.yaml index 6907a83e..b1c48dc1 100644 --- a/stable/anchore-engine/templates/enterprise_upgrade_job.yaml +++ b/stable/anchore-engine/templates/enterprise_upgrade_job.yaml @@ -1,4 +1,5 @@ -{{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseEngineUpgradeJob.enabled }} +{{- template "checkUpgradeCompatibility" . }} +{{- if and .Values.anchoreEnterpriseGlobal.enabled .Values.anchoreEnterpriseEngineUpgradeJob.enabled (not .Values.anchoreGlobal.usePreupgradeHook) }} apiVersion: batch/v1 kind: Job metadata: @@ -8,6 +9,12 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name | quote }} app.kubernetes.io/version: {{ .Chart.AppVersion }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} annotations: "helm.sh/hook": post-upgrade "helm.sh/hook-weight": "-3" @@ -19,6 +26,14 @@ spec: app.kubernetes.io/managed-by: {{ .Release.Service | quote }} app.kubernetes.io/instance: {{ .Release.Name | quote }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "anchore-engine.fullname" . }} + component: anchore-enterprise-upgrade + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} annotations: {{- with .Values.anchoreGlobal.annotations }} {{ toYaml . | nindent 8 }} @@ -42,8 +57,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -60,26 +84,34 @@ spec: args: {{- if not .Values.anchoreGlobal.dbConfig.ssl }} - | - anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}" upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}" upgrade --dontask; anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}" pre-upgrade-check; {{- else if eq .Values.anchoreGlobal.dbConfig.sslMode "require" }} - | - anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} upgrade --dontask; anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} pre-upgrade-check; {{- else }} - | - anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} upgrade --dontask; anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} pre-upgrade-check; {{- end }} {{- if .Values.cloudsql.enabled }} - sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; + - sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; securityContext: + {{- with .Values.anchoreGlobal.containerSecurityContext }} + {{ toYaml . | nindent 10 }} + {{- end }} capabilities: add: - SYS_PTRACE + {{- else }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -87,21 +119,22 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if (.Values.anchoreGlobal.certStoreSecretName) }} volumeMounts: + {{- if (.Values.anchoreGlobal.certStoreSecretName) }} - name: certs mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} resources: {{ toYaml .Values.anchoreEnterpriseEngineUpgradeJob.resources | nindent 10 }} - {{- if or .Values.anchoreGlobal.certStoreSecretName .Values.cloudsql.useExistingServiceAcc }} + {{- if or .Values.anchoreGlobal.certStoreSecretName .Values.cloudsql.useExistingServiceAcc .Values.anchoreGlobal.extraVolumeMounts }} volumes: {{- with .Values.anchoreGlobal.certStoreSecretName }} - name: certs @@ -113,6 +146,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} {{- with .Values.anchoreEnterpriseEngineUpgradeJob.nodeSelector }} nodeSelector: @@ -129,4 +165,4 @@ spec: {{- with .Values.anchoreGlobal.serviceAccountName }} serviceAccountName: {{ . }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/stable/anchore-engine/templates/hooks/pre-upgrade/0-db-upgrade.yaml b/stable/anchore-engine/templates/hooks/pre-upgrade/0-db-upgrade.yaml new file mode 100644 index 00000000..ddfac108 --- /dev/null +++ b/stable/anchore-engine/templates/hooks/pre-upgrade/0-db-upgrade.yaml @@ -0,0 +1,284 @@ +{{- if and .Values.anchoreGlobal.usePreupgradeHook .Values.anchoreEnterpriseGlobal.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Release.Name }}-pre-upgrade-job" + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "2" +spec: + template: + metadata: + name: "{{ .Release.Name }}-pre-upgrade" + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "anchore-engine.fullname" . }} + component: anchore-enterprise-upgrade + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.labels }} + {{ toYaml . | nindent 8 }} + {{- end }} + annotations: + {{- with .Values.anchoreGlobal.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.annotations }} + {{ toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.anchoreGlobal.securityContext }} + securityContext: + {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.anchoreEnterpriseGlobal.enabled }} + imagePullSecrets: + - name: {{ .Values.anchoreEnterpriseGlobal.imagePullSecretName }} + {{- else }} + {{- with .Values.anchoreGlobal.imagePullSecretName }} + imagePullSecrets: + - name: {{ . }} + {{- end }} + {{- end }} + restartPolicy: Never + serviceAccountName: {{ template "anchore-engine.fullname" . }}-upgrade-sa + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.nodeSelector }} + nodeSelector: + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.affinity }} + affinity: + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.tolerations }} + tolerations: + {{ toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.anchoreGlobal.certStoreSecretName .Values.cloudsql.useExistingServiceAcc .Values.anchoreGlobal.extraVolumeMounts }} + volumes: + {{- with .Values.anchoreGlobal.certStoreSecretName }} + - name: certs + secret: + secretName: {{ . }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} + {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + initContainers: + - name: scale-down-anchore + image: bitnami/kubectl:1.27 + command: ["kubectl", "scale", "deployments", "--all", "--replicas=0", "-l", "app={{ template "anchore-engine.fullname" . }}"] + {{- if .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{- with .Values.anchoreGlobal.containerSecurityContext }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- end }} + resources: + {{ toYaml .Values.anchoreEnterpriseEngineUpgradeJob.resources | nindent 12 }} + - name: confirm-service-scaled-down + image: bitnami/kubectl:1.27 + command: ["/bin/bash", "-c"] + args: ["while [[ $(kubectl get pods -l app={{ template "anchore-engine.fullname" . }} --field-selector=status.phase=Running --no-headers | wc -l) -gt 0 ]]; do echo 'waiting for pods to go down' && sleep 5; done"] + {{- if .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{- with .Values.anchoreGlobal.containerSecurityContext }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- end }} + resources: + {{ toYaml .Values.anchoreEnterpriseEngineUpgradeJob.resources | nindent 12 }} + + containers: + {{- if .Values.cloudsql.enabled }} + - name: cloudsql-proxy + image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} + imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/cloud_sql_proxy"] + args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} + - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" + {{- if .Values.cloudsql.useExistingServiceAcc }} + - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" + volumeMounts: + - mountPath: /var/{{ .Values.cloudsql.serviceAccSecretName }} + name: {{ .Values.cloudsql.serviceAccSecretName }} + readOnly: true + {{- end }} + {{- end }} + - name: upgrade-enterprise-db + image: {{ .Values.anchoreEnterpriseGlobal.image }} + imagePullPolicy: Always + {{- if or .Values.anchoreGlobal.containerSecurityContext .Values.cloudsql.enabled }} + securityContext: + {{- with .Values.anchoreGlobal.containerSecurityContext }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.cloudsql.enabled }} + capabilities: + add: + - SYS_PTRACE + {{- end }} + {{- end }} + envFrom: + {{- if not .Values.inject_secrets_via_env }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} + - secretRef: + name: {{ .Values.anchoreGlobal.existingSecretName }} + {{- else }} + - secretRef: + name: {{ include "anchore-engine.fullname" . }} + - secretRef: + name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} + {{- end }} + {{- end }} + env: + {{- with .Values.anchoreGlobal.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + {{- if (.Values.anchoreGlobal.certStoreSecretName) }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{ toYaml .Values.anchoreEnterpriseEngineUpgradeJob.resources | nindent 12 }} + command: ["/bin/bash", "-c"] + args: + {{- if not .Values.anchoreGlobal.dbConfig.ssl }} + - | + {{ print (include "doSourceFile" .) }} anchore-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}" upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}" upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}" pre-upgrade-check; + {{- else if eq .Values.anchoreGlobal.dbConfig.sslMode "require"}} + - | + {{ print (include "doSourceFile" .) }} anchore-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode }} pre-upgrade-check; + {{- else }} + - | + {{ print (include "doSourceFile" .) }} anchore-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST/:5432}:${ANCHORE_DB_PORT:-5432}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreGlobal.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreGlobal.dbConfig.sslRootCertName }} pre-upgrade-check; + {{- end }} + {{- if .Values.cloudsql.enabled }} + - | + sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; + {{- end }} + + - name: upgrade-feeds-db + image: {{ .Values.anchoreEnterpriseGlobal.image }} + imagePullPolicy: Always + {{- if or .Values.anchoreGlobal.containerSecurityContext .Values.cloudsql.enabled }} + securityContext: + {{- with .Values.anchoreGlobal.containerSecurityContext }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.cloudsql.enabled }} + capabilities: + add: + - SYS_PTRACE + {{- end }} + {{- end }} + envFrom: + {{- if not .Values.inject_secrets_via_env }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} + - secretRef: + name: {{ .Values.anchoreEnterpriseFeeds.existingSecretName }} + {{- else }} + - secretRef: + name: {{ include "anchore-engine.enterprise-feeds.fullname" . }} + - secretRef: + name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} + {{- end }} + + {{- end }} + env: + {{- with .Values.anchoreGlobal.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeeds.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + {{- if (.Values.anchoreGlobal.certStoreSecretName) }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{ toYaml .Values.anchoreEnterpriseFeedsUpgradeJob.resources | nindent 12 }} + command: ["/bin/bash", "-c"] + args: + {{- if not .Values.anchoreEnterpriseFeeds.dbConfig.ssl }} + - | + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}" upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}" pre-upgrade-check; + {{- else if eq .Values.anchoreEnterpriseFeeds.dbConfig.sslMode "require" }} + - | + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode }} pre-upgrade-check; + {{- else }} + - | + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreEnterpriseFeeds.dbConfig.sslRootCertName }} upgrade --dontask; + {{ print (include "doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST/:5432}:${ANCHORE_FEEDS_DB_PORT:-5432}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreEnterpriseFeeds.dbConfig.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreEnterpriseFeeds.dbConfig.sslRootCertName }} pre-upgrade-check; + {{- end }} + {{- if .Values.cloudsql.enabled }} + - | + sql_proxy_pid=$(pgrep cloud_sql_proxy) && kill -INT $sql_proxy_pid; + {{- end }} +{{- end }} diff --git a/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_role.yaml b/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_role.yaml new file mode 100644 index 00000000..2277dea0 --- /dev/null +++ b/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_role.yaml @@ -0,0 +1,69 @@ +{{- if .Values.anchoreGlobal.usePreupgradeHook -}} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "anchore-engine.fullname" . }}-upgrade-role + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "anchore-engine.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + {{- with .Values.anchoreGlobal.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} + +rules: + - apiGroups: + - extensions + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - apps + resources: + - deployments/scale + verbs: + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - watch + - list + - get + +{{- end }} diff --git a/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_rolebinding.yaml b/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_rolebinding.yaml new file mode 100644 index 00000000..b75898b0 --- /dev/null +++ b/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_rolebinding.yaml @@ -0,0 +1,48 @@ +{{- if .Values.anchoreGlobal.usePreupgradeHook -}} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "anchore-engine.fullname" . }}-upgrade-role-binding + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "anchore-engine.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + {{- with .Values.anchoreGlobal.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "anchore-engine.fullname" . }}-upgrade-role +subjects: + - kind: ServiceAccount + name: {{ template "anchore-engine.fullname" . }}-upgrade-sa + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_service_account.yaml b/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_service_account.yaml new file mode 100644 index 00000000..07716066 --- /dev/null +++ b/stable/anchore-engine/templates/hooks/pre-upgrade/anchore_upgrade_service_account.yaml @@ -0,0 +1,40 @@ +{{- if .Values.anchoreGlobal.usePreupgradeHook -}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "anchore-engine.fullname" . }}-upgrade-sa + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "anchore-engine.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.anchoreGlobal.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + {{- with .Values.anchoreGlobal.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseEngineUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.anchoreEnterpriseFeedsUpgradeJob.annotations }} + {{ toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/stable/anchore-engine/templates/ingress.yaml b/stable/anchore-engine/templates/ingress.yaml index fff89203..d4d7a3b0 100644 --- a/stable/anchore-engine/templates/ingress.yaml +++ b/stable/anchore-engine/templates/ingress.yaml @@ -1,4 +1,7 @@ {{- if .Values.ingress.enabled -}} + +{{- template "checkIngressValues" . -}} + {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} apiVersion: networking.k8s.io/v1 {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }} @@ -40,26 +43,28 @@ spec: {{- end }} rules: {{- if or .Values.ingress.apiHosts .Values.ingress.uiHosts .Values.ingress.feedsHosts .Values.ingress.reportsHosts }} - {{- range .Values.ingress.apiHosts }} - - host: {{ . | quote }} + {{- range $apiHostIndex, $apiHostName := .Values.ingress.apiHosts }} + - host: {{ $apiHostName | quote }} http: paths: - - path: {{ $.Values.ingress.apiPath }} + {{- range $apiPathIndex, $apiPath := $.Values.ingress.apiPaths }} + - path: {{ $apiPath | quote }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} pathType: Prefix backend: service: - name: {{ template "anchore-engine.api.fullname" $ }} + name: {{ template "anchore-engine.api.serviceName" $ }} port: number: {{ $.Values.anchoreApi.service.port }} - {{else}} + {{- else }} backend: - serviceName: {{ template "anchore-engine.api.fullname" $ }} + serviceName: {{ template "anchore-engine.api.serviceName" $ }} servicePort: {{ $.Values.anchoreApi.service.port }} {{- end }} + {{- end }} {{- end }} - {{- range .Values.ingress.uiHosts }} - - host: {{ . | quote }} + {{- range $uiHostIndex, $uiHostName := .Values.ingress.uiHosts }} + - host: {{ $uiHostName | quote }} http: paths: - path: {{ $.Values.ingress.uiPath }} @@ -67,66 +72,70 @@ spec: pathType: Prefix backend: service: - name: {{ template "anchore-engine.enterprise-ui.fullname" $ }} + name: {{ template "anchore-engine.enterprise-ui.serviceName" $ }} port: number: {{ $.Values.anchoreEnterpriseUi.service.port }} {{- else}} backend: - serviceName: {{ template "anchore-engine.enterprise-ui.fullname" $ }} + serviceName: {{ template "anchore-engine.enterprise-ui.serviceName" $ }} servicePort: {{ $.Values.anchoreEnterpriseUi.service.port }} {{- end }} {{- end }} - {{- range .Values.ingress.feedsHosts }} - - host: {{ . | quote }} + {{- range $feedsHostIndex, $feedsHostName := .Values.ingress.feedsHosts }} + - host: {{ $feedsHostName | quote }} http: paths: - - path: {{ $.Values.ingress.feedsPath }} + {{ range $feedsPathIndex, $feedsPath := $.Values.ingress.feedsPaths }} + - path: {{ $feedsPath }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} pathType: Prefix backend: service: - name: {{ template "anchore-engine.enterprise-feeds.fullname" $ }} + name: {{ template "anchore-engine.enterprise-feeds.serviceName" $ }} port: number: {{ $.Values.anchoreEnterpriseFeeds.service.port }} {{- else}} backend: - serviceName: {{ template "anchore-engine.enterprise-feeds.fullname" $ }} + serviceName: {{ template "anchore-engine.enterprise-feeds.serviceName" $ }} servicePort: {{ $.Values.anchoreEnterpriseFeeds.service.port }} {{- end }} + {{- end }} {{- end }} - {{- range .Values.ingress.reportsHosts }} - - host: {{ . | quote }} + {{- range $reportsHostIndex, $reportsHostName := .Values.ingress.reportsHosts }} + - host: {{ $reportsHostName | quote }} http: paths: - - path: {{ $.Values.ingress.reportsPath }} + {{- range $reportsPathIndex, $reportsPath := $.Values.ingress.reportsPaths }} + - path: {{ $reportsPath }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} pathType: Prefix backend: service: - name: {{ template "anchore-engine.api.fullname" $ }} + name: {{ template "anchore-engine.api.serviceName" $ }} port: number: {{ $.Values.anchoreEnterpriseReports.service.apiPort }} {{- else}} backend: - serviceName: {{ template "anchore-engine.api.fullname" $ }} + serviceName: {{ template "anchore-engine.api.serviceName" $ }} servicePort: {{ $.Values.anchoreEnterpriseReports.service.apiPort }} {{- end }} + {{- end }} {{- end }} {{- else }} - http: paths: - {{- with .Values.ingress.apiPath }} + {{- range .Values.ingress.apiPaths }} - path: {{ . }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} pathType: Prefix backend: service: - name: {{ template "anchore-engine.api.fullname" $ }} + name: {{ template "anchore-engine.api.serviceName" $ }} port: number: {{ $.Values.anchoreApi.service.port }} {{else}} backend: - serviceName: {{ template "anchore-engine.api.fullname" $ }} + serviceName: {{ template "anchore-engine.api.serviceName" $ }} servicePort: {{ $.Values.anchoreApi.service.port }} {{- end }} {{- end }} @@ -136,42 +145,42 @@ spec: pathType: Prefix backend: service: - name: {{ template "anchore-engine.enterprise-ui.fullname" $ }} + name: {{ template "anchore-engine.enterprise-ui.serviceName" $ }} port: number: {{ $.Values.anchoreEnterpriseUi.service.port }} {{- else}} backend: - serviceName: {{ template "anchore-engine.enterprise-ui.fullname" $ }} + serviceName: {{ template "anchore-engine.enterprise-ui.serviceName" $ }} servicePort: {{ $.Values.anchoreEnterpriseUi.service.port }} {{- end }} {{- end }} - {{- with .Values.ingress.feedsPath }} + {{- range .Values.ingress.feedsPaths }} - path: {{ . }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} pathType: Prefix backend: service: - name: {{ template "anchore-engine.enterprise-feeds.fullname" $ }} + name: {{ template "anchore-engine.enterprise-feeds.serviceName" $ }} port: number: {{ $.Values.anchoreEnterpriseFeeds.service.port }} {{- else}} backend: - serviceName: {{ template "anchore-engine.enterprise-feeds.fullname" $ }} + serviceName: {{ template "anchore-engine.enterprise-feeds.serviceName" $ }} servicePort: {{ $.Values.anchoreEnterpriseFeeds.service.port }} {{- end }} {{- end }} - {{- with .Values.ingress.reportsPath }} + {{- range .Values.ingress.reportsPaths }} - path: {{ . }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} pathType: Prefix backend: service: - name: {{ template "anchore-engine.api.fullname" $ }} + name: {{ template "anchore-engine.api.serviceName" $ }} port: number: {{ $.Values.anchoreEnterpriseReports.service.apiPort }} {{- else}} backend: - serviceName: {{ template "anchore-engine.api.fullname" $ }} + serviceName: {{ template "anchore-engine.api.serviceName" $ }} servicePort: {{ $.Values.anchoreEnterpriseReports.service.apiPort }} {{- end }} {{- end }} diff --git a/stable/anchore-engine/templates/policy_bundle_configmap.yaml b/stable/anchore-engine/templates/policy_bundle_configmap.yaml index 10b06c68..a05a0cb0 100644 --- a/stable/anchore-engine/templates/policy_bundle_configmap.yaml +++ b/stable/anchore-engine/templates/policy_bundle_configmap.yaml @@ -15,4 +15,4 @@ data: {{- with .Values.anchoreGlobal.policyBundles }} {{- toYaml . | nindent 2 }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/stable/anchore-engine/templates/policy_engine_deployment.yaml b/stable/anchore-engine/templates/policy_engine_deployment.yaml index cf591822..2d4204ad 100644 --- a/stable/anchore-engine/templates/policy_engine_deployment.yaml +++ b/stable/anchore-engine/templates/policy_engine_deployment.yaml @@ -54,7 +54,6 @@ spec: {{- if not .Values.inject_secrets_via_env }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/engine-config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} spec: {{- with .Values.anchoreGlobal.securityContext }} @@ -73,7 +72,7 @@ spec: {{- if and .Values.anchoreGlobal.scratchVolume.fixGroupPermissions .Values.anchoreGlobal.securityContext.fsGroup }} initContainers: - name: mode-fixer - image: alpine + image: {{ .Values.anchoreGlobal.scratchVolume.initContainerImage }} securityContext: runAsUser: 0 volumeMounts: @@ -89,8 +88,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -108,16 +116,23 @@ spec: image: {{ .Values.anchoreGlobal.image }} imagePullPolicy: {{ .Values.anchoreGlobal.imagePullPolicy }} {{- end }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] {{- if .Values.anchoreEnterpriseGlobal.enabled }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "policy_engine"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade policy_engine {{- else }} - args: ["anchore-manager", "service", "start", "--no-auto-upgrade", "policy_engine"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-manager service start --no-auto-upgrade policy_engine {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -125,8 +140,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -165,6 +178,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -223,6 +239,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchorePolicyEngine.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -235,7 +254,7 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.policy-engine.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} @@ -243,7 +262,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.policy-engine.fullname" . }} + name: {{ template "anchore-engine.policy-engine.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -267,6 +286,9 @@ spec: port: {{ .Values.anchorePolicyEngine.service.port }} targetPort: {{ .Values.anchorePolicyEngine.service.port }} protocol: TCP + {{- with .Values.anchorePolicyEngine.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} diff --git a/stable/anchore-engine/templates/simplequeue_deployment.yaml b/stable/anchore-engine/templates/simplequeue_deployment.yaml index f0d36f58..719c614e 100644 --- a/stable/anchore-engine/templates/simplequeue_deployment.yaml +++ b/stable/anchore-engine/templates/simplequeue_deployment.yaml @@ -54,7 +54,6 @@ spec: {{- if not .Values.inject_secrets_via_env }} checksum/secrets: {{ include (print $.Template.BasePath "/engine_secret.yaml") . | sha256sum }} {{- end }} - checksum/env: {{ include (print $.Template.BasePath "/engine_configmap_env.yaml") . | sha256sum }} checksum/engine-config: {{ include (print $.Template.BasePath "/engine_configmap.yaml") . | sha256sum }} spec: {{- with .Values.anchoreGlobal.securityContext }} @@ -75,8 +74,17 @@ spec: - name: cloudsql-proxy image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} command: ["/cloud_sql_proxy"] args: + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" {{- if .Values.cloudsql.useExistingServiceAcc }} - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" @@ -94,16 +102,23 @@ spec: image: {{ .Values.anchoreGlobal.image }} imagePullPolicy: {{ .Values.anchoreGlobal.imagePullPolicy }} {{- end }} + {{- with .Values.anchoreGlobal.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 10 }} + {{- end }} + command: ["/bin/sh", "-c"] {{- if .Values.anchoreEnterpriseGlobal.enabled }} - args: ["anchore-enterprise-manager", "service", "start", "--no-auto-upgrade", "simplequeue"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade simplequeue {{- else }} - args: ["anchore-manager", "service", "start", "--no-auto-upgrade", "simplequeue"] + args: + - {{ print (include "doSourceFile" .) }} /docker-entrypoint.sh anchore-manager service start --no-auto-upgrade simplequeue {{- end }} envFrom: {{- if not .Values.inject_secrets_via_env }} - {{- if .Values.anchoreGlobal.existingSecret }} + {{- if .Values.anchoreGlobal.useExistingSecrets }} - secretRef: - name: {{ .Values.anchoreGlobal.existingSecret }} + name: {{ .Values.anchoreGlobal.existingSecretName }} {{- else }} - secretRef: name: {{ include "anchore-engine.fullname" . }} @@ -111,8 +126,6 @@ spec: name: {{ print (include "anchore-engine.fullname" .) "-admin-pass" }} {{- end }} {{- end }} - - configMapRef: - name: {{ template "anchore-engine.fullname" . }}-env env: {{- with .Values.anchoreGlobal.extraEnv }} {{- toYaml . | nindent 8 }} @@ -149,6 +162,9 @@ spec: mountPath: /home/anchore/certs/ readOnly: true {{- end }} + {{- with .Values.anchoreGlobal.extraVolumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} livenessProbe: httpGet: path: /health @@ -201,6 +217,9 @@ spec: secret: secretName: {{ .Values.cloudsql.serviceAccSecretName }} {{- end }} + {{- with .Values.anchoreGlobal.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.anchoreSimpleQueue.nodeSelector }} nodeSelector: {{ toYaml . | nindent 8 }} @@ -213,7 +232,7 @@ spec: tolerations: {{ toYaml . | nindent 8 }} {{- end }} - {{- with .Values.anchoreGlobal.serviceAccountName }} + {{- with include "anchore-engine.simplequeue.serviceAccountName" . }} serviceAccountName: {{ . }} {{- end }} @@ -221,7 +240,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{ template "anchore-engine.simplequeue.fullname" . }} + name: {{ template "anchore-engine.simplequeue.serviceName" . }} labels: app: {{ template "anchore-engine.fullname" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version }} @@ -245,6 +264,9 @@ spec: port: {{ .Values.anchoreSimpleQueue.service.port }} targetPort: {{ .Values.anchoreSimpleQueue.service.port }} protocol: TCP + {{- with .Values.anchoreSimpleQueue.service.nodePort }} + nodePort: {{ . }} + {{- end }} selector: app: {{ template "anchore-engine.fullname" . }} component: {{ $component }} diff --git a/stable/anchore-engine/values.yaml b/stable/anchore-engine/values.yaml index 88d66972..7eb345fe 100644 --- a/stable/anchore-engine/values.yaml +++ b/stable/anchore-engine/values.yaml @@ -1,25 +1,33 @@ # Default values for anchore_engine chart. -# Anchore engine has a dependency on Postgresql, configure here +fullnameOverride: Null +# Anchore has a dependency on Postgresql, configure here postgresql: # To use an external DB or Google CloudSQL in GKE, uncomment & set 'enabled: false' - # externalEndpoint, postgresUser, postgresPassword & postgresDatabase are required values for external postgres + # externalEndpoint, postgresUser, postgresPassword, postgresDatabase, & postgresPort are required values for external postgres # enabled: false # If enabled=false specify an external (already existing) postgres deployment for use. - # Set to the host and port. eg. mypostgres.myserver.io:5432 + # Set to the hostname eg. mypostgres.myserver.io externalEndpoint: Null postgresUser: anchoreengine postgresPassword: anchore-postgres,123 postgresDatabase: anchore + postgresPort: 5432 # Configure size of the persistent volume used with helm managed chart. # This should be commented out if using an external endpoint. persistence: resourcePolicy: keep size: 20Gi + # If running on OpenShift using the RedHat images for PostgreSQL, uncomment this line to ensure the PVC is mounted properly + # mountPath: /var/lib/pgsql/data # If running on OpenShift - uncomment the image, imageTag & extraEnv values below. + # For upgrades from previous deployments on PG9.6, use this # image: registry.access.redhat.com/rhscl/postgresql-96-rhel7 + + # For new installs, please use PG v13 instead of 9.6 + # image: registry.redhat.io/rhel9/postgresql-13 # imageTag: latest # extraEnv: # - name: POSTGRESQL_USER @@ -39,6 +47,11 @@ postgresql: cloudsql: # To use CloudSQL in GKE set 'enable: true' enabled: false + # Inject extra arguments into the cloudsql container command, eg: + # extraArgs: + # - "-ip_address_types=PRIVATE" + # - "-enable_iam_login" + extraArgs: [] # set CloudSQL instance: 'project:zone:instancname' instance: "" # Optional existing service account secret to use. @@ -51,39 +64,40 @@ cloudsql: tag: 1.22.0 pullPolicy: IfNotPresent -# Create an ingress resource for all external anchore engine services (API & Enterprise UI). +# Create an ingress resource for all external Anchore services (API & Enterprise UI). # By default this chart is setup to use the NGINX ingress controller which needs to be installed & configured on your cluster. # To utilize a GCE/ALB ingress controller comment out the nginx annotations below, change ingress.class, edit path configurations as per the comments, & set API/UI services to use NodePort. ingress: enabled: false labels: {} - # Use the following paths for GCE/ALB ingress controller - # feedsPath: /v1/feeds/* - # apiPath: /v1/* - # uiPath: /* # Exposing the feeds API w/ ingress is for special cases only, uncomment feedsPath if external access to the feeds API is needed - # feedsPath: /v1/feeds/ + feedsPaths: + - /v1/feeds/ + - /v2/feeds/ # Exposing the report API w/ ingress enables the GraphQL interface at /v1/reports/graphql - # reportsPath: /v1/reports/ - apiPath: /v1/ + reportsPaths: + - /v1/reports/ + - /v2/reports/ + apiPaths: + - /v1/ + - /v2/ + - /version/ uiPath: / - # uncomment `feedsPath` to add an ingress endpoint for the feeds api - # uncomment 'reportsPath' to add an ingress endpoint for the reports api # Set ingressClassName if kubernetes version is >= 1.18 # Reference: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - # ingressClassName: nginx + # ingressClassName: alb # Uncomment the following lines to bind on specific hostnames - # apiHosts: + apiHosts: [] # - anchore-api.example.com - # uiHosts: + uiHosts: [] # - anchore-ui.example.com - # feedsHosts: + feedsHosts: [] # - anchore-feeds.example.com - # reportsHosts: + reportsHosts: [] # - anchore-api.example.com - annotations: + annotations: {} # kubernetes.io/ingress.class: gce # kubernetes.io/ingress.class: nginx # nginx.ingress.kubernetes.io/ssl-redirect: "false" @@ -95,17 +109,17 @@ ingress: # hosts: # - chart-example.local -# Global configuration shared by all anchore-engine services. +# Global configuration shared by all Anchore services. anchoreGlobal: - # Image used for all anchore engine deployments (excluding enterprise components). + # Image used for all Anchore deployments (excluding enterprise components). image: docker.io/anchore/anchore-engine:v1.1.0 imagePullPolicy: IfNotPresent - # Set image pull secret name if using an anchore-engine image from a private registry + # Set image pull secret name if using an Anchore image from a private registry imagePullSecretName: # Specify a service account name utilized to run all Anchore pods - serviceAccountName: Null + # serviceAccountName: Null # Set this value to true to setup the chart for OpenShift deployment compatibility. openShiftDeployment: false @@ -115,7 +129,7 @@ anchoreGlobal: # app.kubernetes.io/managed-by: Helm # foo: bar - # Add common annotations to set on all pods. Useful expecially when inject secrets directly into pods as ENV from vault via mutation-webhook-injection method. + # Add common annotations to set on all pods. Useful especially when inject secrets directly into pods as ENV from vault via mutation-webhook-injection method. # Ref: https://banzaicloud.com/docs/bank-vaults/mutating-webhook/ annotations: {} # vault.security.banzaicloud.io/vault-addr: "https://vault:8200" @@ -129,12 +143,29 @@ anchoreGlobal: # - name: foo # value: bar - # Specifies an existing secret to be used for admin and db passwords - # The secret should define the following environment vars: - # ANCHORE_ADMIN_PASSWORD - # ANCHORE_DB_PASSWORD - # ANCHORE_SAML_SECRET (if applicable) - existingSecret: Null + # When useExistingSecrets is set to `true` the chart will not create secrets specifying the environment variables used in deployments. + # Instead, the chart will use secrets that have already been applied to the namespace that this chart is being deployed to. + useExistingSecrets: false + + # Set the name of your existing secret for all Anchore components + existingSecretName: anchore-engine-env + + # does `source {{ filePath }}` before starting services + doSourceAtEntry: + enabled: false + filePaths: + - "/vault/secrets/config" + + extraVolumes: [] + # - name: config + # secret: + # secretName: config + + extraVolumeMounts: [] + # - name: config + # mountPath: "/vault/secrets/config" + # subPath: config + # readOnly: true # The scratchVolume controls the mounting of an external volume for scratch space for image analysis. Generally speaking # you need to provision 3x the size of the largest image (uncompressed) that you want to analyze for this space. @@ -142,12 +173,13 @@ anchoreGlobal: # Some k8s Volumes do not properly respect the fsGroup permissions. These volumes will get mounted as root:root # regardless of the security permissions requested. The fixGroupPermissions will create an initContainer that will # fixup the permissions. + initContainerImage: alpine fixGroupPermissions: false mountPath: /analysis_scratch details: {} # Specify volume configuration here - # A secret must be created in the same namespace as anchore-engine is deployed, containing the certificates & public/private keys used for SSL, SAML & custom CAs. + # A secret must be created in the same namespace as Anchore is deployed, containing the certificates & public/private keys used for SSL, SAML & custom CAs. # Certs and keys should be added using the file name the certificate is stored at. This secret will be mounted to /home/anchore/certs. certStoreSecretName: Null @@ -158,8 +190,11 @@ anchoreGlobal: runAsGroup: 1000 fsGroup: 1000 + # Specify your container securityContext here + containerSecurityContext: {} + ### - # Start of General Anchore Engine Configurations (populates /config/config.yaml) + # Start of General Anchore Configurations (populates /config/config.yaml) ### # Set where default configs are placed at startup. This must be a writable location for the pod. serviceDir: /anchore_service @@ -181,7 +216,7 @@ anchoreGlobal: # Disable auth on prometheus metrics metricsAuthDisabled: false - # Sets the password & email address for the default anchore-engine admin user. + # Sets the password & email address for the default Anchore admin user. defaultAdminPassword: defaultAdminEmail: example@email.com @@ -196,13 +231,18 @@ anchoreGlobal: oauthEnabled: false oauthTokenExpirationSeconds: 3600 + oauthRefreshTokenExpirationSeconds: 86400 + + # Set this to true in order to disable the SSO JIT provisioning during authentication. This provides an additional + # layer of security and configuration for SSO users to gain access to Anchore. This is disabled by default. + ssoRequireExistingUsers: false # Set this to True to enable storing user passwords only as secure hashes in the db. This can dramatically increase CPU usage if you # don't also use oauth and tokens for internal communications (which requires keys/secret to be configured as well) # WARNING: you should not change this after a system has been initialized as it may cause a mismatch in existing passwords hashedPasswords: false - # Configure the database connection within anchore-engine & enterprise-ui. This may get split into 2 different configurations based on service utilized. + # Configure the database connection within Anchore & enterprise-ui. This may get split into 2 different configurations based on service utilized. dbConfig: timeout: 120 # Use ssl, but the default postgresql config from the dependent chart does not support server side ssl, so this should only be enabled for external dbs @@ -219,7 +259,7 @@ anchoreGlobal: # pool_recycle: 600 internalServicesSsl: - # Enable to force all anchore-engine services to communicate internally using SSL + # Enable to force all Anchore services to communicate internally using SSL enabled: false # specify whether cert is verfied against the local certifacte bundle (allow self-signed certs if set to false) verifyCerts: false @@ -276,6 +316,11 @@ anchoreGlobal: failureThreshold: 3 successThreshold: 1 + # Using the preupgrade hook will use a job annotated with helm's pre-upgrade hook. This job utilizes a service account that will be created to call kubectl to scale down the deployment before running the upgrade job. + # The service account is granted deployment, deployment/scale, and pod permissions. See templates/hooks/pre-upgrade/anchore_upgrade_role.yaml for the full list. + # This can be useful for deployments using helm upgrade --wait or ArgoCD. + usePreupgradeHook: false + # Configuration for the analyzer pods that perform image analysis # There may be many of these analyzers but best practice is to not have more than one per node since analysis # is very IO intensive. Use of affinity/anti-affinity rules for scheduling the analyzers is future work. @@ -288,9 +333,12 @@ anchoreAnalyzer: # - name: foo # value: bar + # Specify the service account name utilized to run the analyzer pods + # serviceAccountName: Null + # The cycle timer is the interval between checks to the work queue for new jobs cycleTimers: - image_analyzer: 5 + image_analyzer: 1 # Controls the concurrency of the analyzer itself. Can be configured to process more than one task at a time, but it IO bound, so may not # necessarily be faster depending on hardware. Should test and balance this value vs. number of analyzers for your deployment cluster performance. @@ -373,7 +421,7 @@ anchoreAnalyzer: affinity: {} -# Pod configuration for the anchore engine api service. +# Pod configuration for the Anchore api service. anchoreApi: replicaCount: 1 @@ -384,10 +432,16 @@ anchoreApi: # kubernetes service configuration for anchore external API service: + # Override the service name + # name: Null type: ClusterIP port: 8228 annotations: {} label: {} + nodePort: null + + # Specify the service account name utilized to run the API pods + # serviceAccountName: Null # (Optional) Overrides for constructing API URLs. All values are optional. # external: @@ -419,6 +473,9 @@ anchoreCatalog: # - name: foo # value: bar + # Specify the service account name utilized to run the catalog pods + # serviceAccountName: Null + # Intervals to run specific events on (seconds) cycleTimers: # Interval to check for an update to a tag @@ -439,8 +496,6 @@ anchoreCatalog: repo_watcher: 60 # Interval for when the catalog garbage collects images marked for deletion image_gc: 60 - # Interval for the runtime inventory image execution poll - k8s_watcher: 300 k8s_image_watcher: 150 resource_metrics: 60 events_gc: 43200 # 12 hours @@ -555,13 +610,6 @@ anchoreCatalog: # **NOTE: all runtime inventory configurations only apply to Anchore Enterprise deployments - # If enabled, the Anchore Helm Chart will create a Service Account with read-only permission to the cluster (pods & namespaces) - # This is largely to support the Out-Of-The-Box Runtime Inventory feature currently. See catalog_deployment.yaml for - # detailed information on the Service Account, Cluster Role & Binding. - # If using an existing service account, set this value to `false` & use anchoreGlobal.serviceAccountName to specify - # what service account to use. Existing service account must have adequate permissions to use this feature. - createServiceAccount: true - runtimeInventory: # This setting tells Anchore how long an image can be missing from an inventory report before it is removed from # The working set. Note: The image will still have a historical record in the reports service, subject to data history @@ -569,27 +617,19 @@ anchoreCatalog: # Note: if a runtime inventory image's digest is also in anchore for regular image analysis, it won't be removed. imageTTLDays: 1 - # Since Anchore is running in Kubernetes, we can collect runtime inventory data out of the box - reportAnchoreCluster: - # If set to true, Anchore will use its own service account's permissions - # (if anchoreCatalog.createServiceAccount is true, see `catalog_deployment.yaml`) to try and collect runtime - # inventory data for all namespaces. - # - # Note: this feature requires a value for clusterName to populate inventory image context - # Note: If anchoreCatalog.createServiceAccount is set to false, and anchoreGlobal.serviceAccountName is unspecified, - # then the Anchore Catalog service won't have enough permission to be able to read cluster information - # (pods & namespaces) for the embedded runtime inventory. - enabled: true - clusterName: anchore-k8s - namespaces: - - all + # checks image status and detects that an 'analyzing' state image is being processed by an analyzer that is no + # longer in the 'up' state and will revert the state of the image back to 'not_analyzed' to allow fast re-queueing. + downAnalyzerTaskRequeue: true # kubernetes service configuration for anchore catalog api service: + # Override the service name + # name: Null type: ClusterIP port: 8082 annotations: {} labels: {} + nodePort: null # resources: # limits: @@ -607,7 +647,7 @@ anchoreCatalog: tolerations: [] affinity: {} -# Pod configuration for the anchore engine policy service. +# Pod configuration for the Anchore policy service. anchorePolicyEngine: replicaCount: 1 @@ -616,6 +656,9 @@ anchorePolicyEngine: # - name: foo # value: bar + # Specify the service account name utilized to run the policy engine pods + # serviceAccountName: Null + # Intervals to run specific events on (seconds) cycleTimers: # Interval to run a feed sync to get latest cve data @@ -625,17 +668,24 @@ anchorePolicyEngine: # 1 minute between checks to verify local grype-db is up to date grypedb_sync: 60 - # After chart v1.18.0 the only valid vulnerabilityProvider setting is `grype`, any other settings will fail - vulnerabilityProvider: grype + # Controls the load of the Image Package DB Entries and disables the packages.verify gate. + # Reduces significant DB load as a result. + enablePackageDbLoad: true + + # Reduces load by not requiring a re-scan of every sbom for vulnerabilities after each feed sync + enableImagesByVulnerabilityAPI: true cacheTTL: 3600 # 1 hour # kubernetes service configuration for anchore policy engine api service: + # Override the service name + # name: Null type: ClusterIP port: 8087 annotations: {} labels: {} + nodePort: null # resources: # limits: @@ -653,7 +703,7 @@ anchorePolicyEngine: tolerations: [] affinity: {} -# Pod configuration for the anchore engine simplequeue service. +# Pod configuration for the Anchore simplequeue service. anchoreSimpleQueue: replicaCount: 1 @@ -662,12 +712,18 @@ anchoreSimpleQueue: # - name: foo # value: bar + # Specify the service account name utilized to run the simple queue pods + # serviceAccountName: Null + # kubernetes service configuration for anchore simplequeue api service: + # Override the service name + # name: Null type: ClusterIP port: 8083 annotations: {} labels: {} + nodePort: null # resources: # limits: @@ -701,6 +757,7 @@ anchoreEngineUpgradeJob: tolerations: [] affinity: {} annotations: {} + labels: {} # This section is used for configuring anchore enterprise. anchoreEnterpriseGlobal: @@ -709,7 +766,8 @@ anchoreEnterpriseGlobal: # Create this secret with the following command - kubectl create secret generic anchore-enterprise-license --from-file=license.yaml= licenseSecretName: anchore-enterprise-license - image: docker.io/anchore/enterprise:v4.0.1 + image: docker.io/anchore/enterprise:v4.9.5 + imagePullPolicy: IfNotPresent # Name of the kubernetes secret containing your dockerhub creds with access to the anchore enterprise images. # Create this secret with the following command - kubectl create secret docker-registry anchore-enterprise-pullcreds --docker-server=docker.io --docker-username= --docker-password= --docker-email= @@ -719,23 +777,30 @@ anchoreEnterpriseGlobal: # Only utilized if anchoreEnterpriseGlobal.enabled: true anchore-feeds-db: # To use an external DB or Google CloudSQL, uncomment & set 'enabled: false' - # externalEndpoint, postgresUser, postgresPassword & postgresDatabase are required values for external postgres + # externalEndpoint, postgresUser, postgresPassword, postgresDatabase, & postgresPort are required values for external postgres # enabled: false # if enabled=false specify an external (already existing) postgres deployment for use. - # Set to the host and port. eg. mypostgres.myserver.io:5432 + # Set to the hostname eg. mypostgres.myserver.io externalEndpoint: Null postgresUser: anchoreengine postgresPassword: anchore-postgres,123 postgresDatabase: anchore-feeds + postgresPort: 5432 # Configure size of the persistent volume used with helm managed chart. # This should be commented out if using an external endpoint. persistence: resourcePolicy: keep size: 20Gi + # If running on OpenShift using the RedHat images for PostgreSQL, uncomment this line to ensure the PVC is mounted properly + # mountPath: /var/lib/pgsql/data # If running on OpenShift - uncomment the image, imageTag & extraEnv values below. + # For upgrades from previous deployments on PG9.6, use this # image: registry.access.redhat.com/rhscl/postgresql-96-rhel7 + + # For new installs, please use PG v13 instead of 9.6 + # image: registry.redhat.io/rhel9/postgresql-13 # imageTag: latest # extraEnv: # - name: POSTGRESQL_USER @@ -756,14 +821,15 @@ anchore-feeds-db: # Database is used for temporarily loading the Ruby gem vulnerability data by the Enterprise Feeds service. anchore-feeds-gem-db: # To use an external DB uncomment & set 'enabled: false' - # externalEndpoint, postgresUser, postgresPassword & postgresDatabase are required values for external postgres + # externalEndpoint, postgresUser, postgresPassword, postgresDatabase, & postgresPort are required values for external postgres # enabled: false # If enabled=false specify an external (already existing) postgres deployment for use. - # Set to the host and port. eg. mypostgres.myserver.io:5432 + # Set to the hostname eg. mypostgres.myserver.io externalEndpoint: Null postgresUser: postgres postgresPassword: anchore-postgres,123 postgresDatabase: gems + postgresPort: 5432 persistence: enabled: false @@ -787,34 +853,51 @@ anchoreEnterpriseFeeds: # GitHub advisory feeds require a github developer personal access token with no permission scopes selected. githubDriverToken: null + # The NVD API allows for an API key to reduce rate limiting. Request one from https://nvd.nist.gov/developers/request-an-api-key + useNvdDriverApiKey: false + nvdDriverApiKey: null + # Enable microsoft feeds msrcDriverEnabled: false # Uncomment to add MSRC product IDs for generating their feed data, this extends the pre-defined list of product IDs # msrcWhitelist: # - 12345 + # Additional Ubuntu feed groups + ubuntuExtraReleases: {} + # kinetic: '22.10' + + # Additional Debian feeds groups + debianExtraReleases: {} + # trixie: '13' + # The number of concurrent threads used when downloading RHEL feeds rhelDriverConcurrency: 5 + # Git Repository settings for the Ubuntu feed driver + ubuntuDriverGitUrl: "https://git.launchpad.net/ubuntu-cve-tracker" + # Switch to the git protocol endpoint for significantly improved reliability in fetches as this endpoint is not + # throttled heavily like the https one; however, the git protocol is not validated or encrypted and runs on a different + # port from https + # ubuntuDriverGitUrl: "git://git.launchpad.net/ubuntu-cve-tracker" + ubuntuDriverGitBranch: "master" + # Set extra environment variables. These will be set on all feeds containers. extraEnv: [] # - name: foo # value: bar + # Specify the service account name utilized to run the feeds pods + # serviceAccountName: Null + # Time delay in seconds between consecutive driver runs for processing data cycleTimers: driver_sync: 7200 - # Specifies an existing secret to be used for anchore admin and db passwords - # The secret should define the following environment vars: - # ANCHORE_ADMIN_PASSWORD - # ANCHORE_FEEDS_DB_PASSWORD - # ANCHORE_SAML_SECRET (if applicable) - # ANCHORE_GITHUB_TOKEN (if applicable) + # Set the name of your existing secret for Anchore Enterprise Feeds + existingSecretName: anchore-enterprise-feeds-env - existingSecret: Null - - # Configure the database connection within anchore-engine & enterprise-ui. This may get split into 2 different configurations based on service utilized. + # Configure the database connection within Anchore & enterprise-ui. This may get split into 2 different configurations based on service utilized. dbConfig: timeout: 120 # Use ssl, but the default postgresql config from the dependent chart does not support server side ssl, so this should only be enabled for external dbs @@ -833,6 +916,7 @@ anchoreEnterpriseFeeds: # persistence controls the mounting of an external volume for feed driver download workspace. persistence: enabled: true + fixGroupPermissions: false resourcePolicy: keep # set resource-policy Helm annotation on PVC. Can be nil or "keep" ## A manually managed Persistent Volume and Claim @@ -854,10 +938,13 @@ anchoreEnterpriseFeeds: # kubernetes service configuration for anchore feeds service api service: + # Override the service name + # name: Null type: ClusterIP port: 8448 annotations: {} labels: {} + nodePort: null # resources: # limits: @@ -891,9 +978,10 @@ anchoreEnterpriseFeedsUpgradeJob: tolerations: [] affinity: {} annotations: {} + labels: {} # Configure the Anchore Enterprise role based access control component. -# This component consists of 2 containers that run as side-cars in the anchore engine api pod. +# This component consists of 2 containers that run as side-cars in the Anchore api pod. anchoreEnterpriseRbac: enabled: true @@ -905,11 +993,14 @@ anchoreEnterpriseRbac: # Kubernetes service config - annotations & serviceType configs must be set in anchoreApi # Due to RBAC sharing a service with the general API. service: + # Override the service name + # name: Null managerPort: 8229 authPort: 8089 type: ClusterIP annotations: {} labels: {} + nodePort: null # authResources: # limits: @@ -941,11 +1032,14 @@ anchoreEnterpriseReports: # - name: foo # value: bar + # Specify the service account name utilized to run the reports pods + # serviceAccountName: Null + # GraphiQL is a GUI for editing and testing GraphQL queries and mutations. # Set enableGraphiql to true and open http://:/v1/reports/graphql in a browser for reports API enableGraphiql: true - # Set enableDataIngress to true for periodically syncing data from anchore engine into the reports service + # Set enableDataIngress to true for periodically syncing data from Anchore into the reports service enableDataIngress: true # Set enableDataEgress to true to periodically remove reporting data that has been removed in other parts of system @@ -971,11 +1065,15 @@ anchoreEnterpriseReports: reports_data_egress: 600 service: + # Override the service name + # name: Null type: ClusterIP apiPort: 8558 workerPort: 8778 annotations: {} labels: {} + nodePort: null + apinodePort: null # resources: # limits: @@ -1002,6 +1100,9 @@ anchoreEnterpriseNotifications: # - name: foo # value: bar + # Specify the service account name utilized to run the notification pods + # serviceAccountName: Null + cycleTimers: notifications: 30 @@ -1009,10 +1110,13 @@ anchoreEnterpriseNotifications: # uiUrl: "http://" service: + # Override the service name + # name: Null type: ClusterIP port: 8668 annotations: {} labels: {} + nodePort: null # resources: # limits: @@ -1032,9 +1136,9 @@ anchoreEnterpriseNotifications: # Configure the Anchore Enterprise UI. anchoreEnterpriseUi: - # If enabled is set to false, set anchore-ui-redis.enabled to false to ensure that helm doesn't stand up a unneccessary redis instance. + # If enabled is set to false, set ui-redis.enabled to false to ensure that helm doesn't stand up a unneccessary redis instance. enabled: true - image: docker.io/anchore/enterprise-ui:v4.0.0 + image: docker.io/anchore/enterprise-ui:v4.9.1 imagePullPolicy: IfNotPresent # Set extra environment variables. These will be set on all UI containers. @@ -1042,11 +1146,16 @@ anchoreEnterpriseUi: # - name: foo # value: bar - # Specifies an existing secret to be used for db and redis endpoints - # This secret should define the following ENV vars - # ANCHORE_APPDB_URI - # ANCHORE_REDIS_URI - existingSecret: Null + # Specify the service account name utilized to run the UI pods + # serviceAccountName: Null + + # Set the name of your existing secret for the Anchore Enterprise UI + existingSecretName: anchore-enterprise-ui-env + + # Specify configurations for database connection user + # This should specifically allow overriding and separation of the ui database user + # dbUser: anchoreengineui + # dbPass: anchore-postgres,123ui # The (optional) `appdbConfig` key specifies the connection options # for the application SQL database. @@ -1073,6 +1182,26 @@ anchoreEnterpriseUi: # If using LDAPS with a custom CA certificate, add the certificate to the secret specified at anchoreGlobal.certStoreSecretName and specify the name of the cert here ldapsRootCaCertName: Null + # The (optional) `logLevel` key allows you to set the descriptive detail of the + # application log output. The key value must be a string selected from the + # following priority-ordered list: + # + # - error + # - warn + # - info + # - http + # - debug + # + # Once set, each level will automatically include the output for any levels + # above it—for example, `info` will include the log output for details at the + # `warn` and `error` details, whereas `error` will only show error output. + # + # This value can be overridden by using the `ANCHORE_LOG_LEVEL` environment + # variable. When no level is set, either within this configuration file or by the + # environment variable, a default level of `http` is used. + # + logLevel: http + # Specifies whether to trust a reverse proxy when setting secure cookies (via the `X-Forwarded-Proto` header). enableProxy: false @@ -1171,11 +1300,25 @@ anchoreEnterpriseUi: # kubernetes service configuration for anchore UI service: + # Override the service name + # name: Null type: ClusterIP port: 80 annotations: {} labels: {} sessionAffinity: ClientIP + nodePort: null + + # The (optional) `enrich_inventory_view` key allows you to set whether the + # Kubernetes tab should aggregate and include compliance and vulnerability data + # from the reports service. + # + # Setting this key to be `False` can increase performance on high-scale systems. + # + # This value can be overridden by using the `ANCHORE_ENRICH_INVENTORY_VIEW` + # environment variable. When no flag is set, either within this configuration + # file or by the environment variable, a default setting of `True` is used. + enrichInventoryView: true # resources: # limits: @@ -1193,14 +1336,15 @@ anchoreEnterpriseUi: tolerations: [] affinity: {} -# Anchore Engine Enterprise UI is dependent on redis for storing sessions +# Anchore Enterprise UI is dependent on redis for storing sessions # Only utilized if 'anchoreEnterpriseUi.enabled: true' -anchore-ui-redis: - password: anchore-redis,123 - cluster: - enabled: false - persistence: - enabled: false +ui-redis: + auth: + password: anchore-redis,123 + architecture: standalone + master: + persistence: + enabled: false # To use an external redis endpoint, uncomment to set 'enabled: false' # enabled: false @@ -1225,6 +1369,7 @@ anchoreEnterpriseEngineUpgradeJob: tolerations: [] affinity: {} annotations: {} + labels: {} # To inject secrets ( credentails data ) via env, rather k8s secrets please set this flag to true. # This feature will be useful, especially to inject secrets directly into k8s pods from hashicorp vault diff --git a/stable/ecs-inventory/.helmignore b/stable/ecs-inventory/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/stable/ecs-inventory/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/stable/ecs-inventory/Chart.yaml b/stable/ecs-inventory/Chart.yaml new file mode 100644 index 00000000..04f0197b --- /dev/null +++ b/stable/ecs-inventory/Chart.yaml @@ -0,0 +1,26 @@ +apiVersion: v2 + +name: ecs-inventory +description: A Helm chart for the Anchore ECS Inventory, which gathers an inventory of images in use by Amazon Elastic Container Service (ECS). + +keywords: + - analysis + - ecs + - docker + - anchore + - image + - inventory + - security + - scanner + +maintainers: + - name: btodhunter + email: bradyt@anchore.com + - name: hn23 + email: hung.nguyen@anchore.com + +type: application +version: 0.0.6 +appVersion: "1.3.0" + +icon: https://anchore.com/wp-content/uploads/2016/08/anchore.png diff --git a/stable/ecs-inventory/README.md b/stable/ecs-inventory/README.md new file mode 100644 index 00000000..9bbe2402 --- /dev/null +++ b/stable/ecs-inventory/README.md @@ -0,0 +1,107 @@ +# Anchore ECS Inventory Helm Chart +## Anchore ECS Inventory: Anchore ECS Inventory +Anchore ECS Inventory is a tool to gather an inventory of images in use by Amazon Elastic Container Service (ECS) and ship them to the Anchore platform. Anchore ECS Inventory must be able to resolve the Anchore URL and requires API credentials. The minimum version of the Anchore Enterprise platform required for K8s Inventory is 4.7. + +## Installation +Anchore ECS Inventory creates it's own secret based on your values.yaml file for the following keys that are required for successfully deploying and connecting the ecs-inventory service to the Anchore Platform and AWS ECS Service: +- ecsInventory.awsAccessKeyId +- ecsInventory.awsSecretAccessKey + +You can install the chart via via: + ``` + helm repo add anchore https://charts.anchore.io + helm install -f anchore/ecs-inventory + ``` + +A basic values file can always be found [here](https://github.com/anchore/anchore-charts/tree/master/stable/ecs-inventory/values.yaml). The key configurations are in the ecsInventory section. + +## Using your own secrets + +The (ecsInventory.useExistingSecret and ecsInventory.existingSecretName) or ecsInventory.injectSecretsViaEnv keys allows you to create your own secret and provide it in the values file or place the required secret into the pod via different means such as injecting the secrets into the pod using hashicorp vault. + +For example: + +- Create a secret in kubernetes: + + ``` + apiVersion: v1 + kind: Secret + metadata: + name: ecs-inventory-secrets + type: Opaque + stringData: + ANCHORE_ECS_INVENTORY_ANCHORE_PASSWORD: foobar + AWS_ACCESS_KEY_ID: someKeyId + AWS_SECRET_ACCESS_KEY: someSecretAccessKey + ``` + +- Provide it to the helm chart via the values file: + ``` + ecsInventory: + useExistingSecret: true + existingSecretName: "ecs-inventory-secrets" + ``` + +The Anchore API Password and required AWS secret values can also be injected into the environment of the ecs-inventory container. For injecting the environment variable + ``` + # set + ecsInventory: + injectSecretsViaEnv=true + ``` + +See the [ecs-inventory repo](https://github.com/anchore/ecs-inventory) for more information about the ECS Inventory specific configuration## Parameters + +## Parameters + +### Common Resource Parameters + +| Name | Description | Value | +| ------------------------------------- | -------------------------------------------------------------------- | ---------------------------------------- | +| `replicaCount` | Number of replicas for the Ecs Inventory deployment | `1` | +| `image` | Image used for all Ecs Inventory deployment deployments | `docker.io/anchore/ecs-inventory:v1.1.0` | +| `imagePullPolicy` | Image pull policy used by all deployments | `IfNotPresent` | +| `imagePullSecretName` | Name of Docker credentials secret for access to private repos | `""` | +| `serviceAccountName` | Name of a service account used to run all Anchore Ecs Inventory pods | `""` | +| `useExistingSecret` | set to true to use an existing/precreated secret | `false` | +| `existingSecretName` | the name of the precreated secret | `""` | +| `injectSecretsViaEnv` | Enable secret injection into pod environment variables | `false` | +| `extraEnv` | extra environment variables. These will be set on all containers. | `[]` | +| `annotations` | Common annotations set on all Kubernetes resources | `{}` | +| `deploymentAnnotations` | annotations to set on the ecs-inventory deployment | `{}` | +| `securityContext.runAsUser` | The securityContext runAsUser for all Anchore ECS Inventory pods | `1000` | +| `securityContext.runAsGroup` | The securityContext runAsGroup for all Anchore ECS Inventory pods | `1000` | +| `securityContext.fsGroup` | The securityContext fsGroup for all Anchore ECS Inventory pods | `1000` | +| `resources` | Resource requests and limits for Anchore ECS Inventory pods | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `labels` | Adds additionnal labels to all kubernetes resources | `{}` | +| `probes.liveness.initialDelaySeconds` | Initial delay seconds for liveness probe | `1` | +| `probes.liveness.timeoutSeconds` | Timeout seconds for liveness probe | `10` | +| `probes.liveness.periodSeconds` | Period seconds for liveness probe | `5` | +| `probes.liveness.failureThreshold` | Failure threshold for liveness probe | `6` | +| `probes.liveness.successThreshold` | Success threshold for liveness probe | `1` | +| `probes.readiness.timeoutSeconds` | Timeout seconds for the readiness probe | `10` | +| `probes.readiness.periodSeconds` | Period seconds for the readiness probe | `15` | +| `probes.readiness.failureThreshold` | Failure threshold for the readiness probe | `3` | +| `probes.readiness.successThreshold` | Success threshold for the readiness probe | `1` | + + +### ecsInventory Parameters ## + +| Name | Description | Value | +| ---------------------------------------- | ------------------------------------------------------------------ | ----------------------- | +| `ecsInventory.quiet` | Determine whether or not to log the inventory report to stdout | `false` | +| `ecsInventory.output` | The output format of the report (options: table, json) | `json` | +| `ecsInventory.logLevel` | the level of verbosity for logs | `info` | +| `ecsInventory.logFile` | location to write the log file (default is not to have a log file) | `""` | +| `ecsInventory.pollingIntervalSeconds` | The polling interval of the ECS API in seconds | `60` | +| `ecsInventory.anchoreUrl` | the url of the anchore platform | `http://localhost:8228` | +| `ecsInventory.anchoreAccount` | the account of the anchore platform | `admin` | +| `ecsInventory.anchoreUser` | the username of the anchore platform | `admin` | +| `ecsInventory.anchorePassword` | the password of the anchore platform | `foobar` | +| `ecsInventory.anchoreHttpInsecure` | whether or not anchore is using ssl/tls | `true` | +| `ecsInventory.anchoreHttpTimeoutSeconds` | the amount of time in seconds before timing out | `10` | +| `ecsInventory.awsAccessKeyId` | the AWS Access Key ID | `foobar` | +| `ecsInventory.awsSecretAccessKey` | the AWS Secret Access Key | `foobar` | +| `ecsInventory.awsRegion` | the AWS Region | `us-west-2` | diff --git a/stable/ecs-inventory/templates/NOTES.txt b/stable/ecs-inventory/templates/NOTES.txt new file mode 100644 index 00000000..5ef3999d --- /dev/null +++ b/stable/ecs-inventory/templates/NOTES.txt @@ -0,0 +1,6 @@ +Anchore ECS Inventory is a tool to gather an inventory of images in use by Amazon Elastic Container Service (ECS) and ship them to the Anchore platform. +Anchore ECS Inventory must be able to resolve the Anchore URL and requires API credentials. + +For more info see: https://github.com/anchore/ecs-inventory + +ECS Inventory is now installed. diff --git a/stable/ecs-inventory/templates/_helpers.tpl b/stable/ecs-inventory/templates/_helpers.tpl new file mode 100644 index 00000000..0313182b --- /dev/null +++ b/stable/ecs-inventory/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Selector labels +*/}} +{{- define "ecsInventory.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ecsInventory.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ecsInventory.labels" -}} +helm.sh/chart: {{ include "ecsInventory.chart" . }} +{{ include "ecsInventory.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +chart: {{ .Chart.Name }}-{{ .Chart.Version }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.labels }} +{{ toYaml . }} +{{- end }} +app: {{ include "ecsInventory.fullname" . }} +release: {{ .Release.Name }} +heritage: {{ .Release.Service }} +{{- end }} diff --git a/stable/ecs-inventory/templates/_names.tpl b/stable/ecs-inventory/templates/_names.tpl new file mode 100644 index 00000000..f975897e --- /dev/null +++ b/stable/ecs-inventory/templates/_names.tpl @@ -0,0 +1,38 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "ecsInventory.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ecsInventory.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ecsInventory.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ecsInventory.serviceAccountName" -}} +{{- default "default" .Values.serviceAccountName }} +{{- end }} diff --git a/stable/ecs-inventory/templates/configmap.yaml b/stable/ecs-inventory/templates/configmap.yaml new file mode 100644 index 00000000..1921be3f --- /dev/null +++ b/stable/ecs-inventory/templates/configmap.yaml @@ -0,0 +1,27 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "ecsInventory.fullname" . }} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ecsInventory.labels" . | nindent 4 }} +data: + config.yaml: | + output: {{ .Values.ecsInventory.output | quote }} + log: + level: {{ .Values.ecsInventory.logLevel | quote }} + file: {{ .Values.ecsInventory.logFile | quote }} + anchore: + url: {{ .Values.ecsInventory.anchoreUrl | quote }} + user: {{ .Values.ecsInventory.anchoreUser | quote }} + password: $ANCHORE_ECS_INVENTORY_ANCHORE_PASSWORD + account: {{ .Values.ecsInventory.anchoreAccount | quote }} + http: + insecure: {{ .Values.ecsInventory.anchoreHttpInsecure }} + timeout-seconds: {{ .Values.ecsInventory.anchoreHttpTimeoutSeconds }} + region: {{ .Values.ecsInventory.awsRegion | quote }} + polling-interval-seconds: {{ .Values.ecsInventory.pollingIntervalSeconds }} + quiet: {{ .Values.ecsInventory.quiet }} diff --git a/stable/ecs-inventory/templates/deployment.yaml b/stable/ecs-inventory/templates/deployment.yaml new file mode 100644 index 00000000..9365255c --- /dev/null +++ b/stable/ecs-inventory/templates/deployment.yaml @@ -0,0 +1,100 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ecsInventory.fullname" . }} + labels: + {{- include "ecsInventory.labels" . | nindent 4 }} + annotations: + {{- with .Values.deploymentAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "ecsInventory.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.useExistingSecret }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + {{- include "ecsInventory.labels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecretName }} + imagePullSecrets: + - name: {{ . }} + {{- end }} + serviceAccountName: {{ include "ecsInventory.serviceAccountName" . }} + {{- with .Values.securityContext }} + securityContext: {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + livenessProbe: + exec: + command: + - /anchore-ecs-inventory + - version + initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }} + timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.liveness.periodSeconds }} + failureThreshold: {{ .Values.probes.liveness.failureThreshold }} + successThreshold: {{ .Values.probes.liveness.successThreshold }} + readinessProbe: + exec: + command: + - /anchore-ecs-inventory + - version + timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.readiness.periodSeconds }} + failureThreshold: {{ .Values.probes.readiness.failureThreshold }} + successThreshold: {{ .Values.probes.readiness.successThreshold }} + {{- with .Values.resources }} + resources: {{ toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /etc/xdg/anchore-ecs-inventory/config.yaml + subPath: config.yaml + {{- if .Values.ecsInventory.logFile }} + - name: logs + mountPath: {{ dir .Values.ecsInventory.logFile }} + {{- end }} + envFrom: + {{- if not .Values.injectSecretsViaEnv }} + - secretRef: + name: {{ default (include "ecsInventory.fullname" .) .Values.existingSecretName }} + {{- end }} + env: + {{- with .Values.extraEnv }} + {{- toYaml . | nindent 10 }} + {{- end }} + + volumes: + {{- if .Values.ecsInventory.logFile }} + - name: logs + emptyDir: {} + {{- end }} + - name: config-volume + configMap: + name: {{ include "ecsInventory.fullname" . }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/stable/ecs-inventory/templates/secrets.yaml b/stable/ecs-inventory/templates/secrets.yaml new file mode 100644 index 00000000..ed5d0912 --- /dev/null +++ b/stable/ecs-inventory/templates/secrets.yaml @@ -0,0 +1,15 @@ +# only create the secret if injectSecret is false AND existingSecrets is false + +{{- if and (not .Values.injectSecretsViaEnv ) (not .Values.useExistingSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "ecsInventory.fullname" . }} + labels: + {{- include "ecsInventory.labels" . | nindent 4 }} +type: Opaque +stringData: + ANCHORE_ECS_INVENTORY_ANCHORE_PASSWORD: {{ .Values.ecsInventory.anchorePassword | quote }} + AWS_ACCESS_KEY_ID: {{ .Values.ecsInventory.awsAccessKeyId | quote }} + AWS_SECRET_ACCESS_KEY: {{ .Values.ecsInventory.awsSecretAccessKey | quote }} +{{- end -}} diff --git a/stable/ecs-inventory/values.yaml b/stable/ecs-inventory/values.yaml new file mode 100644 index 00000000..b7b4fb54 --- /dev/null +++ b/stable/ecs-inventory/values.yaml @@ -0,0 +1,172 @@ +################################################### +## @section Common Resource Parameters +## Common params used by all ECS Inventory resources +################################################### + +## @param replicaCount Number of replicas for the Ecs Inventory deployment +## +replicaCount: 1 + +## @param image Image used for all Ecs Inventory deployment deployments +## use docker.io/anchore/ecs-inventory:v1.3.0-fips-amd64 if you want an image built for fips use +## +image: "docker.io/anchore/ecs-inventory:v1.3.0" + +## @param imagePullPolicy Image pull policy used by all deployments +## ref: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy +## +imagePullPolicy: "IfNotPresent" + +## @param imagePullSecretName Name of Docker credentials secret for access to private repos +## Secrets must be manually created in the same namespace as release +## +imagePullSecretName: "" + +## @param serviceAccountName Name of a service account used to run all Anchore Ecs Inventory pods +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccountName: "" + + +## @param useExistingSecret set to true to use an existing/precreated secret +## +useExistingSecret: false + +## @param existingSecretName the name of the precreated secret +## +existingSecretName: "" + +## @param injectSecretsViaEnv Enable secret injection into pod environment variables +## +injectSecretsViaEnv: false + +## @param extraEnv extra environment variables. These will be set on all containers. +### - name: foo +### value: bar +## +extraEnv: [] + +## @param annotations Common annotations set on all Kubernetes resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +annotations: {} + +## @param deploymentAnnotations annotations to set on the ecs-inventory deployment +## +deploymentAnnotations: {} + +## @param securityContext.runAsUser The securityContext runAsUser for all Anchore ECS Inventory pods +## @param securityContext.runAsGroup The securityContext runAsGroup for all Anchore ECS Inventory pods +## @param securityContext.fsGroup The securityContext fsGroup for all Anchore ECS Inventory pods +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + +## @param resources Resource requests and limits for Anchore ECS Inventory pods +## +resources: {} + +## @param nodeSelector Node labels for pod assignment +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment +## +tolerations: [] + +## @param affinity Affinity for pod assignment +## +affinity: {} + +## @param labels Adds additionnal labels to all kubernetes resources +## +labels: {} + +## @param probes.liveness.initialDelaySeconds Initial delay seconds for liveness probe +## @param probes.liveness.timeoutSeconds Timeout seconds for liveness probe +## @param probes.liveness.periodSeconds Period seconds for liveness probe +## @param probes.liveness.failureThreshold Failure threshold for liveness probe +## @param probes.liveness.successThreshold Success threshold for liveness probe +## @param probes.readiness.timeoutSeconds Timeout seconds for the readiness probe +## @param probes.readiness.periodSeconds Period seconds for the readiness probe +## @param probes.readiness.failureThreshold Failure threshold for the readiness probe +## @param probes.readiness.successThreshold Success threshold for the readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +probes: + liveness: + initialDelaySeconds: 1 + timeoutSeconds: 10 + periodSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readiness: + timeoutSeconds: 10 + periodSeconds: 15 + failureThreshold: 3 + successThreshold: 1 + + +###################################### +## @section ecsInventory Parameters ## +###################################### + +ecsInventory: + + ## @param ecsInventory.quiet Determine whether or not to log the inventory report to stdout + ## + quiet: false + + ## @param ecsInventory.output The output format of the report (options: table, json) + ## + output: "json" + + ## @param ecsInventory.logLevel the level of verbosity for logs + ## + logLevel: "info" + + ## @param ecsInventory.logFile location to write the log file (default is not to have a log file) + ## + logFile: "" + + ## @param ecsInventory.pollingIntervalSeconds The polling interval of the ECS API in seconds + ## + pollingIntervalSeconds: 60 + + ## @param ecsInventory.anchoreUrl the url of the anchore platform + ## + anchoreUrl: "http://localhost:8228" + + ## @param ecsInventory.anchoreAccount the account of the anchore platform + ## + anchoreAccount: "admin" + + ## @param ecsInventory.anchoreUser the username of the anchore platform + ## + anchoreUser: "admin" + + ## @param ecsInventory.anchorePassword the password of the anchore platform + ## + anchorePassword: "foobar" + + ## @param ecsInventory.anchoreHttpInsecure whether or not anchore is using ssl/tls + ## + anchoreHttpInsecure: true + + ## @param ecsInventory.anchoreHttpTimeoutSeconds the amount of time in seconds before timing out + ## + anchoreHttpTimeoutSeconds: 10 + + ## @param ecsInventory.awsAccessKeyId the AWS Access Key ID + ## + awsAccessKeyId: "foobar" + + ## @param ecsInventory.awsSecretAccessKey the AWS Secret Access Key + ## + awsSecretAccessKey: "foobar" + + ## @param ecsInventory.awsRegion the AWS Region + ## + awsRegion: "us-west-2" diff --git a/stable/enterprise/.helmignore b/stable/enterprise/.helmignore new file mode 100644 index 00000000..f188d49f --- /dev/null +++ b/stable/enterprise/.helmignore @@ -0,0 +1,2 @@ +.git +tests/ diff --git a/stable/enterprise/Chart.lock b/stable/enterprise/Chart.lock new file mode 100644 index 00000000..605b4775 --- /dev/null +++ b/stable/enterprise/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: postgresql + repository: oci://registry-1.docker.io/bitnamicharts + version: 12.5.9 +- name: redis + repository: oci://registry-1.docker.io/bitnamicharts + version: 17.11.8 +- name: feeds + repository: https://charts.anchore.io/stable + version: 2.4.1 +digest: sha256:fc8274f1cbd805dc79cecefdcb4bb9cddbb7e4863ed738aee9ccdb60611d939b +generated: "2024-04-10T17:13:17.54518-04:00" diff --git a/stable/enterprise/Chart.yaml b/stable/enterprise/Chart.yaml new file mode 100644 index 00000000..8c18ec0c --- /dev/null +++ b/stable/enterprise/Chart.yaml @@ -0,0 +1,50 @@ +apiVersion: v2 +name: enterprise +version: "2.5.2" +appVersion: "5.4.0" +kubeVersion: 1.23.x - 1.28.x || 1.23.x-x - 1.29.x-x +description: | + Anchore Enterprise is a complete container security workflow solution for professional teams. Easily integrating with CI/CD systems, + it allows developers to bolster security without compromising velocity and enables security teams to audit and verify compliance in real-time. + It is based on Anchore Engine, an open-source image inspection and scanning tool. +keywords: + - analysis + - docker + - anchore + - "anchore-engine" + - "anchore-enterprise" + - image + - security + - vulnerability + - scanner +home: https://anchore.com +sources: + - https://github.com/anchore/anchore-charts/tree/master/stable/enterprise +maintainers: + - name: zhill + email: zach@anchore.com + - name: btodhunter + email: bradyt@anchore.com + - name: hnguyen + email: hung.nguyen@anchore.com +icon: https://anchore.com/wp-content/uploads/2016/08/anchore.png +dependencies: + - name: postgresql + version: "~12.5" + repository: "oci://registry-1.docker.io/bitnamicharts" + condition: postgresql.chartEnabled + - name: redis + version: "~17.11" + repository: "oci://registry-1.docker.io/bitnamicharts" + condition: ui-redis.chartEnabled + alias: ui-redis + - name: feeds + version: "~2" + repository: "@anchore" + # repository: file://../feeds + condition: feeds.chartEnabled + import-values: + - child: service + parent: feeds.service + - child: anchoreConfig.internalServicesSSL + parent: feeds.anchoreConfig.internalServicesSSL diff --git a/stable/enterprise/README.md b/stable/enterprise/README.md new file mode 100644 index 00000000..2215a3a3 --- /dev/null +++ b/stable/enterprise/README.md @@ -0,0 +1,1411 @@ +# Anchore Enterprise Helm Chart + +> :exclamation: **Important:** View the **[Chart Release Notes](#release-notes)** for the latest changes prior to installation or upgrading. + +This Helm chart deploys Anchore Enterprise on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Anchore Enterprise is an software bill of materials (SBOM) - powered software supply chain management solution designed for a cloud-native world. It provides continuous visibility into supply chain security risks. Anchore Enterprise takes a developer-friendly approach that minimizes friction by embedding automation into development toolchains to generate SBOMs and accurately identify vulnerabilities, malware, misconfigurations, and secrets for faster remediation. + +See the [Anchore Enterprise Documentation](https://docs.anchore.com) for more details. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Installing the Chart](#installing-the-chart) +- [Installing on Openshift](#installing-on-openshift) +- [Upgrading](#upgrading-the-chart) +- [Uninstalling the Chart](#uninstalling-the-chart) +- [Configuration](#configuration) + - [External Database Requirements](#external-database-requirements) + - [Installing on Openshift](#installing-on-openshift) + - [Enterprise Feeds Configuration](#enterprise-feeds-configuration) + - [Analyzer Image Layer Cache Configuration](#analyzer-image-layer-cache-configuration) + - [Configuring Object Storage](#configuring-object-storage) + - [Configuring Analysis Archive Storage](#configuring-analysis-archive-storage) + - [Existing Secrets](#existing-secrets) + - [Ingress](#ingress) + - [Prometheus Metrics](#prometheus-metrics) + - [Scaling Individual Services](#scaling-individual-services) + - [Using TLS Internally](#using-tls-internally) +- [Migrating to the Anchore Enterprise Helm Chart](#migrating-to-the-anchore-enterprise-helm-chart) +- [Parameters](#parameters) +- [Release Notes](#release-notes) + +## Prerequisites + +- [Helm](https://helm.sh/) >=3.8 +- [Kubernetes](https://kubernetes.io/) >=1.23 + +## Installing the Chart + +> **Note**: For migration steps from an Anchore Engine Helm chart deployment, refer to the [Migrating to the Anchore Enterprise Helm Chart](#migrating-to-the-anchore-enterprise-helm-chart) section. + +This guide covers deploying Anchore Enterprise on a Kubernetes cluster with the default configuration. Refer to the [Configuration](#configuration) section for additional guidance on production deployments. + +1. **Create a Kubernetes Secret for License File**: Generate a Kubernetes secret to store your Anchore Enterprise license file. + + ```shell + export NAMESPACE=anchore + export LICENSE_PATH="license.yaml" + + kubectl create secret generic anchore-enterprise-license --from-file=license.yaml=${LICENSE_PATH} -n ${NAMESPACE} + ``` + +1. **Create a Kubernetes Secret for DockerHub Credentials**: Generate another Kubernetes secret for DockerHub credentials. These credentials should have access to private Anchore Enterprise repositories. We recommend that you create a brand new DockerHub user for these pull credentials. Contact [Anchore Support](https://get.anchore.com/contact/) to obtain access. + + ```shell + export NAMESPACE=anchore + export DOCKERHUB_PASSWORD="password" + export DOCKERHUB_USER="username" + export DOCKERHUB_EMAIL="example@email.com" + + kubectl create secret docker-registry anchore-enterprise-pullcreds --docker-server=docker.io --docker-username=${DOCKERHUB_USER} --docker-password=${DOCKERHUB_PASSWORD} --docker-email=${DOCKERHUB_EMAIL} -n ${NAMESPACE} + ``` + +1. **Add Chart Repository & Deploy Anchore Enterprise**: Create a custom values file, named `anchore_values.yaml`, to override any chart parameters. Refer to the [Parameters](#parameters) section for available options. + + > :exclamation: **Important**: Default passwords are specified in the chart. It's highly recommended to modify these before deploying. + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + helm repo add anchore https://charts.anchore.io + helm install ${RELEASE} -n ${NAMESPACE} anchore/enterprise -f anchore_values.yaml + ``` + + > **Note**: This command installs Anchore Enterprise with a chart-managed PostgreSQL database, which may not be suitable for production use. See the [External Database](#external-database-requirements) section for details on using an external database. + +1. **Post-Installation Steps**: Anchore Enterprise will take some time to initialize. After the bootstrap phase, it will begin a vulnerability feed sync. Image analysis will show zero vulnerabilities until this sync is complete. This can take several hours based on the enabled feeds. Use the following [anchorectl](https://docs.anchore.com/current/docs/deployment/anchorectl/) commands to check the system status: + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + export ANCHORECTL_URL=http://localhost:8228/v1/ + export ANCHORECTL_PASSWORD=$(kubectl get secret -n ${NAMESPACE} "${RELEASE}-enterprise" -o jsonpath='{.data.ANCHORE_ADMIN_PASSWORD}' | base64 -d -) + + kubectl port-forward -n ${NAMESPACE} svc/${RELEASE}-enterprise-api 8228:8228 # port forward for anchorectl in another terminal + anchorectl system status # anchorectl defaults to the user admin, and to the password ${ANCHORECTL_PASSWORD} automatically if set + ``` + + > **Tip**: List all releases using `helm list` + +### Installing on Openshift + +As of August 2, 2023, Helm does not offer native support for passing `null` values to child or dependency charts. For details, refer to this [Helm GitHub issue](https://github.com/helm/helm/issues/9027). Given that the `feeds` chart is a dependency, a workaround is to deploy it as a standalone chart and configure the `enterprise` deployment to point to this separate `feeds` deployment. + +Additionally, be aware that you'll need to either disable or properly set the parameters for `containerSecurityContext`, `runAsUser`, and `fsGroup` for the `ui-redis` and any PostgreSQL database that you deploy using the Enterprise chart (e.g., via `postgresql.chartEnabled` or `feeds-db.chartEnabled`). + +For example: + +1. **Deploy feeds chart as a standalone deployment:** + + ```shell + helm install my-release anchore/feeds \ + --set securityContext.fsGroup=null \ + --set securityContext.runAsGroup=null \ + --set securityContext.runAsUser=null \ + --set feeds-db.primary.containerSecurityContext.enabled=false \ + --set feeds-db.primary.podSecurityContext.enabled=false + ``` + +1. **Deploy the enterprise chart with appropriate values:** + + ```shell + helm install anchore anchore/enterprise \ + --set securityContext.fsGroup=null \ + --set securityContext.runAsGroup=null \ + --set securityContext.runAsUser=null \ + --set feeds.chartEnabled=false \ + --set feeds.url=my-release-feeds \ + --set postgresql.primary.containerSecurityContext.enabled=false \ + --set postgresql.primary.podSecurityContext.enabled=false \ + --set ui-redis.master.podSecurityContext.enabled=false \ + --set ui-redis.master.containerSecurityContext.enabled=false + ``` + + > **Note:** disabling the containerSecurityContext and podSecurityContext may not be suitable for production. See [Redhat's documentation](https://docs.openshift.com/container-platform/4.13/authentication/managing-security-context-constraints.html#managing-pod-security-policies) on what may be suitable for production. For more information on the openshift.io/sa.scc.uid-range annotation, see the [openshift docs](https://docs.openshift.com/dedicated/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth) + +#### Example Openshift values file + +```yaml +# NOTE: This is not a production ready values file for an openshift deployment. + +securityContext: + fsGroup: null + runAsGroup: null + runAsUser: null +feeds: + chartEnabled: false + url: my-release-feeds +postgresql: + primary: + containerSecurityContext: + enabled: false + podSecurityContext: + enabled: false +ui-redis: + master: + podSecurityContext: + enabled: false + containerSecurityContext: + enabled: false +``` + +## Upgrading the Chart + +> :exclamation: **Important:** View the **[Chart Release Notes](#release-notes)** for the latest changes prior to upgrading. + +A Helm pre-upgrade hook initiates a Kubernetes job that scales down all active Anchore Enterprise pods and handles the Anchore database upgrade. + +The Helm upgrade is marked as successful only upon the job's completion. This process causes the Helm client to pause until the job finishes and new Anchore Enterprise pods are initiated. To monitor the upgrade, follow the logs of the upgrade jobs. These jobs are automatically removed after a subsequent successful Helm upgrade. + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + helm upgrade ${RELEASE} -n ${NAMESPACE} anchore/enterprise -f anchore_values.yaml + ``` + +An optional post-upgrade hook is available to perform Anchore Enterprise upgrades without forcing all pods to terminate prior to running the upgrade. This is the same upgrade behavior that was enabled by default in the legacy anchore-engine chart. To enable the post-upgrade hook, set `upgradeJob.usePostUpgradeHook=true` in your values file. + +## Uninstalling the Chart + +To completely remove the Anchore Enterprise deployment and associated Kubernetes resources, follow the steps below: + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + helm uninstall ${RELEASE} -n ${NAMESPACE} + ``` + +After deleting the helm release, there are still a few persistent volume claims to delete. Delete these only if you're certain you no longer need them. + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + kubectl get pvc -n ${NAMESPACE} + kubectl delete pvc ${RELEASE}-feeds -n ${NAMESPACE} + kubectl delete pvc ${RELEASE}-feeds-db -n ${NAMESPACE} + kubectl delete pvc ${RELEASE}-postgresql -n ${NAMESPACE} + ``` + +## Configuration + +This section outlines some of the available configuration options for Anchore Enterprise. The default settings are specified in the bundled [values file](https://github.com/anchore/anchore-charts-dev/blob/main/stable/enterprise/values.yaml). To customize these settings, create your own `anchore_values.yaml` file and populate it with the configuration options you wish to override. To apply your custom configuration during installation, pass your custom values file to the `helm install` command: + +```shell +export NAMESPACE=anchore +export RELEASE="my-release" + +helm install ${RELEASE} -n ${NAMESPACE} anchore/enterprise -f custom_values.yaml +``` + +For additional guidance on customizing your Anchore Enterprise deployment, reach out to [Anchore Support](get.anchore.com/contact/). + +### External Database Requirements + +Anchore Enterprise requires the use of a PostgreSQL-compatible database version 13 or above. For production environments, leveraging managed database services like AWS RDS or Google Cloud SQL is advised. While the Helm chart includes a chart-managed database by default, you can override this setting to use an external database. + +For optimal performance, allocate a minimum of 100GB storage to accommodate images, tags, subscriptions, policies, and other data entities. Furthermore, configure the database to support a minimum of 2,000 client connections. This limit may need to be adjusted upward if you're running more Anchore services than the default configuration. + +#### External Postgres Database Configuration + +```yaml +postgresql: + chartEnabled: false + auth.password: + auth.username: + auth.database: + externalEndpoint: + +anchoreConfig: + database: + ssl: true + sslMode: require +``` + +#### RDS Postgres Database Configuration With TLS + +To obtain a comprehensive AWS RDS PostgreSQL certificate bundle, which includes both intermediate and root certificates for all AWS regions, you can download it [here](https://truststore.pki.rds.amazonaws.com/global/global-bundle.pem). An example of creating the certificate secret can be found in [TLS Configuration](#using-tls-internally). + +```yaml +postgresql: + chartEnabled: false + auth.password: + auth.username: + auth.database: + externalEndpoint: + +certStoreSecretName: some-cert-store-secret + +anchoreConfig: + database: + ssl: true + sslMode: verify-full + # sslRootCertName is the name of the Postgres root CA certificate stored in certStoreSecretName + sslRootCertFileName: postgres-root-ca-cert +``` + +#### Google CloudSQL Database Configuration + +```yaml +## anchore_values.yaml +postgresql: + chartEnabled: false + auth.password: + auth.username: + auth.database: + +cloudsql: + enabled: true + instance: "project:zone:instancename" + # Optional existing service account secret to use. See https://cloud.google.com/sql/docs/postgres/authentication + useExistingServiceAcc: true + # If using an existing Service Account, you must create a secret (named my_service_acc in the example below) + # which includes the JSON token from Google's IAM (corresponding to for_cloudsql.json in the example below) + serviceAccSecretName: my_service_acc + serviceAccJsonName: for_cloudsql.json +``` + +### Enterprise Feeds Configuration + +The Anchore Enterprise Feeds service is provided as a dependent [Helm chart](https://github.com/anchore/anchore-charts/tree/main/stable/feeds). This service is comprised of different drivers for different vulnerability feeds. The drivers can be configured separately, and some drivers require a token or other credential. + +See the [Anchore Enterprise Feeds](https://docs.anchore.com/current/docs/configuration/feeds/) documentation for details. + +```yaml +feeds: + anchoreConfig: + feeds: + drivers: + github: + enabled: true + # The GitHub feeds driver requires a GitHub developer personal access token with no permission scopes selected. + # See https://docs.github.com/en/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token + token: your-github-token + msrc: + enabled: true +``` + +#### Enterprise Feeds External Database Configuration + +Anchore Enterprise Feeds requires the use of a PostgreSQL-compatible database version 13 or above. This database is distinct from the primary Anchore Enterprise database. For production environments, leveraging managed database services like AWS RDS or Google Cloud SQL is advised. While the Helm chart includes a chart-managed database by default, you can override this setting to use an external database. + +See previous [examples](#external-database-requirements) of configuring RDS Postgresql and Google CloudSQL. + +```yaml +feeds: + anchoreConfig: + database: + ssl: true + sslMode: require + + feeds-db: + enabled: false + auth.password: + auth.username: + auth.database: + externalEndpoint: +``` + +### Analyzer Image Layer Cache Configuration + +To improve performance, the Anchore Enterprise Analyzer can be configured to cache image layers. This can be particularly helpful if many images analyzed are built from the same set of base images. + +It is recommended that layer cache data is stored in an external volume to ensure that the cache does not use all of the ephemeral storage allocated for an analyzer host. See [Anchore Enterprise Layer Caching](https://docs.anchore.com/current/docs/configuration/storage/layer_caching/) documentation for details. Refer to the default values file for configuring the analysis scratch volume. + +```yaml +anchoreConfig: + analyzer: + layer_cache_max_gigabytes: 6 +``` + +### Configuring Object Storage + +Anchore Enterprise utilizes an object storage system to persistently store metadata related to images, tags, policies, and subscriptions. + +#### Configuring The Object Storage Backend + +In addition to a database (Postgres) storage backend, Anchore Enterprise object storage drivers also support S3 and Swift storage. This enables scalable external object storage without burdening Postgres. + +> **Note:** Using external object storage is recommended for production usage. + +- [Database backend](https://docs.anchore.com/current/docs/configuration/storage/object_store/database_driver/): Postgres database backend; this is the default, so using Postgres as the analysis archive storage backend requires no additional configuration +- [Local FS backend](https://docs.anchore.com/current/docs/configuration/storage/object_store/filesystem_driver/): A local filesystem on the core pod (Does not handle sharding or replication; generally recommended only for testing) +- [OpenStack Swift backend](https://docs.anchore.com/current/docs/configuration/storage/object_store/swift_driver/) +- [S3 backend](https://docs.anchore.com/current/docs/configuration/storage/object_store/s3_driver/): Any AWS S3 API compatible system (e.g. MinIO, Scality) + +### Configuring Analysis Archive Storage + +The Analysis Archive subsystem within Anchore Enterprise is designed to store extensive JSON documents, potentially requiring significant storage capacity based on the number of images analyzed. As a general guideline, allocate approximately 10MB of storage per analyzed image. Consequently, analyzing thousands of images could necessitate gigabytes of storage space. The Analysis Archive subsystem offers configurable options for both data compression and selection of the storage backend. + +Configuration of external analysis archive storage is essentially identical to configuration of external object storage. See [Anchore Enterprise Analysis Archive](https://docs.anchore.com/current/docs/configuration/storage/analysis_archive/) documentation for details. + +> **Note:** Using external analysis archive storage is recommended for production usage. + +### Existing Secrets + +For deployments where version-controlled configurations are essential, it's advised to avoid storing credentials directly in values files. Instead, manually create Kubernetes secrets and reference them as existing secrets within your values files. When using existing secrets, the chart will load environment variables into deployments from the secret names specified by the following values: + +- `.Values.existingSecretName` [default: anchore-enterprise-env] +- `.Values.feeds.existingSecretName` [default: anchore-enterprise-feeds-env] +- `.Values.ui.existingSecretName` [default: anchore-enterprise-ui-env] + +To enable this feature, set the following values to `true` in your values file: + +```yaml +useExistingSecrets: true + +feeds: + useExistingSecrets: true +``` + +Below are sample Kubernetes secret objects and corresponding guidelines on integrating them into your Anchore Enterprise configuration. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: anchore-enterprise-env +type: Opaque +stringData: + ANCHORE_ADMIN_PASSWORD: foobar1234 + ANCHORE_DB_NAME: anchore + ANCHORE_DB_USER: anchore + ANCHORE_DB_HOST: anchore-postgresql + ANCHORE_DB_PORT: 5432 + ANCHORE_DB_PASSWORD: anchore-postgres,123 + # (if applicable) ANCHORE_SAML_SECRET: foobar,saml1234 + +--- +apiVersion: v1 +kind: Secret +metadata: + name: anchore-enterprise-ui-env +type: Opaque +stringData: + # if using TLS to connect to Postgresql you must add the ?ssl=[require|verify-ca|verify-full] parameter to the end of the URI + ANCHORE_APPDB_URI: postgresql://anchoreengine:anchore-postgres,123@anchore-postgresql:5432/anchore + ANCHORE_REDIS_URI: redis://:anchore-redis,123@anchore-ui-redis-master:6379 + +--- +apiVersion: v1 +kind: Secret +metadata: + name: anchore-enterprise-feeds-env + app: anchore +type: Opaque +stringData: + ANCHORE_ADMIN_PASSWORD: foobar1234 + ANCHORE_FEEDS_DB_NAME: anchore-feeds + ANCHORE_FEEDS_DB_USER: anchoreengine + ANCHORE_FEEDS_DB_PASSWORD: anchore-postgres,123 + ANCHORE_FEEDS_DB_HOST: anchore-enterprise-feeds-db + ANCHORE_FEEDS_DB_PORT: 5432 + # (if applicable) ANCHORE_SAML_SECRET: foobar,saml1234 + # (if applicable) ANCHORE_GITHUB_TOKEN: foobar,github1234 + # (if applicable) ANCHORE_NVD_API_KEY: foobar,nvd1234 + # (if applicable) ANCHORE_GEM_DB_NAME: anchore-gems + # (if applicable) ANCHORE_GEM_DB_USER: anchoregemsuser + # (if applicable) ANCHORE_GEM_DB_PASSWORD: foobar1234 + # (if applicable) ANCHORE_GEM_DB_HOST: anchorefeeds-gem-db.example.com:5432 +``` + +### Ingress + +[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) serves as the gateway to expose HTTP and HTTPS routes from outside the Kubernetes cluster to services within it. Routing is governed by rules specified in the Ingress resource. Kubernetes supports a variety of ingress controllers, such as AWS ALB and GCE controllers. + +This Helm chart includes a foundational ingress configuration that is customizable. You can expose various Anchore Enterprise external APIs, including the core API, UI, reporting, and feeds, by editing the `ingress` section in your values file. + +Ingress is disabled by default in this Helm chart. To enable it, along with the [NGINX ingress controller](https://kubernetes.github.io/ingress-nginx/) for core API and UI routes, set the `ingress.enabled` value to `true`. + +```yaml +ingress: + enabled: true +``` + +#### ALB Ingress Controller + +The [Kubernetes ALB ingress controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) must be installed into the cluster for this configuration to work. + +```yaml +ingress: + enabled: true + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + ingressClassName: alb + + apiHosts: + - anchore-api.example.com + uiHosts: + - anchore-ui.example.com + feedsHosts: + - anchore-feeds.example.com + +api: + service: + type: NodePort + +feeds: + service: + type: NodePort + +ui: + service: + type: NodePort +``` + +#### GCE Ingress Controller + +The [Kubernetes GCE ingress controller](https://cloud.google.com/kubernetes-engine/docs/concepts/ingress) must be installed into the cluster for this configuration to work. + +```yaml +ingress: + enabled: true + ingressClassName: gce + apiPaths: + - /v1/* + - /v2/* + - /version/* + feedsPaths: + - /v1/feeds/* + - /v2/feeds/* + uiPath: /* + + apiHosts: + - anchore-api.example.com + uiHosts: + - anchore-ui.example.com + feedsHosts: + - anchore-feeds.example.com + +api: + service: + type: NodePort + +feeds: + service: + type: NodePort + +ui: + service: + type: NodePort +``` + +### Prometheus Metrics + +Anchore Enterprise offers native support for exporting Prometheus metrics from each of its containers. When this feature is enabled, each service exposes metrics via its existing service port. If you're adding Prometheus manually to your deployment, you'll need to configure it to recognize each pod and its corresponding ports. + +```yaml +anchoreConfig: + metrics: + enabled: true + auth_disabled: true +``` + +For those using the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md), a ServiceMonitor can be deployed within the same namespace as your Anchore Enterprise release. Once deployed, the Prometheus operator will automatically begin scraping the pre-configured endpoints for metrics. + +#### Example ServiceMonitor Configuration + +The `targetPort` values in this example use the default Anchore Enterprise service ports. + +You will require a ServiceAccount for Prometheus (referenced in the Prometheus configuration below). + +```yaml +--- +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus + namespace: + labels: + prometheus: prometheus +spec: + replicas: 1 + serviceAccountName: prometheus + serviceMonitorSelector: + matchLabels: + serviceMonitorName: anchore + +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: anchore-prom-metrics + namespace: + labels: + serviceMonitorName: anchore # from Prometheus configuration above +spec: + namespaceSelector: + matchNames: + - + selector: + matchLabels: + app.kubernetes.io/instance: + endpoints: + # api + - targetPort: 8228 + interval: 30s + path: /metrics + scheme: http + # catalog + - targetPort: 8082 + interval: 30s + path: /metrics + scheme: http + # policy engine + - targetPort: 8087 + interval: 30s + path: /metrics + scheme: http + # simple queue + - targetPort: 8083 + interval: 30s + path: /metrics + scheme: http + # feeds + - targetPort: 8448 + interval: 30s + path: /metrics + scheme: http + # reports + - targetPort: 8558 + interval: 30s + path: /metrics + scheme: http + # notifications + - targetPort: 8668 + interval: 30s + path: /metrics + scheme: http +``` + +### Scaling Individual Services + +Anchore Enterprise services can be scaled by adjusting replica counts: + +```yaml +analyzer: + replicaCount: 5 + +policyEngine: + replicaCount: 3 +``` + +> **Note:** Contact [Anchore Support](https://get.anchore.com/contact/) for assistance in scaling and tuning your Anchore Enterprise installation. + +### Using TLS Internally + +Anchore Enterprise supports TLS for secure communication between its services. For detailed configuration steps, refer to the [Anchore TLS documentation](https://docs.anchore.com/current/docs/configuration/tls_ssl/). + +To implement this, create a Kubernetes secret in the same namespace where the Helm chart is installed. This secret should encapsulate all custom certificates, including CA certificates and those used for internal TLS communication. + +The Kubernetes secret will be mounted into all Anchore Enterprise containers at the path `/home/anchore/certs`. Anchore Enterprise's entrypoint script will auto-configure all certificates located in this directory, supplementing them with the operating system's default CA bundle. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: anchore-tls-certs + namespace: ... +type: Opaque +data: + rds-combined-ca-cert-bundle.pem: +[base64 encoded text] + internal-cert.pem: +[base64 encoded text] + internal-cert-key.pem: +[base64 encoded text] + ldap-combined-ca-cert-bundle.pem: +[base64 encoded text] +``` + +Values configuration corresponding to above example secret: + +```yaml +certStoreSecretName: anchore-tls-certs + +anchoreConfig: + database: + timeout: 120 + ssl: true + sslMode: verify-full + # sslRootCertName is the name of the Postgres root CA certificate stored in certStoreSecretName + sslRootCertFileName: rds-combined-ca-cert-bundle.pem + + internalServicesSSL: + enabled: true + # Specify whether cert is verified against the local certificate bundle (If set to false, self-signed certs are allowed) + verifyCerts: true + certSecretKeyFileName: internal-cert-key.pem + certSecretCertFileName: internal-cert.pem + +ui: + # Specify an LDAP CA cert if using LDAP authenication. + # Note if using an internal ca cert for internalServicesSSL, combine that into the ldap-combined-ca-cert-bundle.pem + ldapsRootCaCertName: ldap-combined-ca-cert-bundle.pem +``` + +## Migrating to the Anchore Enterprise Helm Chart + +This guide provides steps for transitioning from an Anchore Engine Helm chart deployment to the updated Anchore Enterprise Helm chart, a necessary step for users planning to upgrade to Anchore Enterprise version v5.0.0 or later. + + > :warning: **Warning**: The values file used by the Anchore Enterprise Helm chart is different from the one used by the Anchore Engine Helm chart. Make sure to convert your existing values file accordingly. + +A [migration script](https://github.com/anchore/anchore-charts/tree/main/scripts) is available to automate the conversion of your Anchore Engine values file to the new Enterprise format. A usage example is provided below. + +### Migration Prerequisites + +- **Anchore Version**: Ensure that your current deployment is running Anchore Enterprise version 4.9.x (but not v5.0.0+). This is required to ensure that the migration script can properly convert your values file. + + > **Note:** Upgrade your [anchore-engine](https://github.com/anchore/anchore-charts/tree/main/stable/anchore-engine) chart deployment to `v1.28.0` or higher to ensure that you're running Anchore Enterprise v4.9.x. + +- **PostgreSQL Version**: You need PostgreSQL version 13 or higher. For upgrading your existing PostgreSQL installation, refer to the official [PostgreSQL documentation](https://www.postgresql.org/docs/13/upgrading.html). Database migration help for helm managed PostgreSQL deployments is provided below. + + > **Note:** This chart deploys PostgreSQL 13 by default. + +- **Runtime Environment**: Docker or Podman must be installed on the machine where the migration will run. + +### Expected Changes to Your Deployment + +The Anchore Enterprise Helm chart introduces several changes to the deployment compared to the Anchore Engine chart deployment. These changes are outlined below. + +#### Service Names + +- All service names have been updated to follow the Enterprise naming convention: + - `-anchore-engine-api` -> `-enterprise-api` + - `-anchore-engine-catalog` -> `-enterprise-catalog` + - `-anchore-engine-enterprise-feeds` -> `-feeds` + - `-anchore-engine-enterprise-notifications` -> `-enterprise-notifications` + - `-anchore-engine-enterprise-reports` -> `-enterprise-reports` + - `-anchore-engine-enterprise-ui` -> `-enterprise-ui` + - `-anchore-engine-policy` -> `-enterprise-policy` + - `-anchore-engine-simplequeue` -> `-enterprise-simplequeue` + +#### Labels, Annotations & Selectors + +- Standard Kubernetes labels and annotations replace the custom ones used in Anchore Engine: + - `component` -> `app.kubernetes.io/component` + - `release` -> `app.kubernetes.io/instance` + - `app` -> `app.kubernetes.io/name` + - `chart` -> `helm.sh/chart` + +#### Dependent Services + +- The Feeds service is now deployed as a dependent chart, it can be configured using the [Feeds Values](https://github.com/anchore/anchore-charts/blob/main/stable/feeds/values.yaml) +- The bundled PostgreSQL chart has been replaced with the Bitnami PostgreSQL Chart as a dependency. Configuration options can be found in the [Postgresql Values](https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml). + +#### Upgrade Behavior + +- Pre-upgrade Helm hooks, along with a Bitnami/kubectl init container, are used to terminate all pods before running the Anchore upgrade. You can revert to legacy post-upgrade hooks by setting `upgradeJob.usePostUpgradeHook=true`. + +#### Application Configuration + +- Configuration is now primarily managed through environment variables, specified in the `-enterprise-config-env-vars` ConfigMap and set via the values file. +- Previously, unexposed values for advanced Anchore configurations have been removed. Instead, you can use the `extraEnv` value to set the required environment variables. + +### Migration Rollback Strategy + +The migration employs a blue/green deployment strategy to minimize risk and facilitate easy rollback. Should you encounter issues during the migration, reverting to the prior state is straightforward: simply scale your Anchore-Engine deployment back up. + +For those using an external PostgreSQL database without the benefit of a blue/green deployment strategy, a manual database restoration is necessary. Utilize a pre-migration backup to restore the database to its previous version, and then proceed to scale your Anchore-Engine deployment back up. + +See the [Migration Rollback Steps](#migration-rollback-steps) section for more details. + +### Step-by-Step Migration Process + +1. **Upgrade Existing Anchore Engine Deployment**: Upgrade your existing Anchore Engine deployment to chart version 1.28.0 or higher. This will ensure that your deployment is running Anchore Enterprise v4.9.x. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= # Existing Engine release name + export VALUES_FILE_NAME=my-values-file.yaml # Existing Engine chart values file + + helm repo update + helm upgrade ${ENGINE_RELEASE} -n ${NAMESPACE} anchore/anchore-engine -f ${VALUES_FILE_NAME} --version=^1.28.0 + ``` + +1. **Generate a New Enterprise Values File**: Use the migration script to convert your existing Anchore Engine values file to the new Anchore Enterprise format. + + >**Note**: This command mounts a local volume to persistently store the output files, it also mounts the input values file within the container for conversion. It's critical to review both the output logs and the new `output/enterprise.values.yaml` file before moving forward. + + ```shell + export VALUES_FILE_NAME=my-values-file.yaml # Existing Engine chart values file + + docker run -v ${PWD}:/tmp -v ${PWD}/${VALUES_FILE_NAME}:/app/${VALUES_FILE_NAME} docker.io/anchore/enterprise-helm-migrator:latest -e /app/${VALUES_FILE_NAME} -d /tmp/output + ``` + +### If Using an External PostgreSQL Database + +1. **Scale Down Anchore Engine**: To avoid data inconsistency, scale down your existing Anchore Engine deployment to zero replicas. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= + + kubectl scale deployment --replicas=0 -l app=${ENGINE_RELEASE}-anchore-engine -n ${NAMESPACE} + ``` + +1. **Perform database backup**: Backup your external database. See the official [PostgreSQL documentation](https://www.postgresql.org/docs/13/backup.html) for guidance. If using a managed cloud database service refer to their documentation. + +1. **Perform database upgrade**: Upgrade your external database. See the official [PostgreSQL documentation](https://www.postgresql.org/docs/13/upgrading.html) for guidance. If using a managed cloud database service refer to their documentation. + + > Tip: Leveraging a backup to instantiate a new database instance enables a non-intrusive database upgrade and Enterprise chart migration. This approach preserves the integrity of your original database. By adopting a blue/green deployment strategy for the migration, you gain the advantage of effortless rollbacks in case of migration-related issues. + +1. **(Optional) Update Database Hostname**: If you're employing a blue/green deployment strategy for the database upgrade, update the database hostname in your values file and/or existing Kubernetes secrets to point to your newly created database instance. This step is essential for properly configuring the Enterprise chart to use the new database. + +1. **Deploy Anchore Enterprise**: Use the converted values file to deploy the new Anchore Enterprise Helm chart. + + >**Note:** If you are **not using existing secrets**, you will need to uncomment the `ADMIN_PASS` and `SET_ADMIN_PASS` export commands below. This is needed to ensure that your Enterprise deployment stores the correct Anchore admin password in the secret. + + ```shell + export NAMESPACE=anchore + export ENTERPRISE_RELEASE= + export ENTERPRISE_VALUES_FILE=${PWD}/output/enterprise.my-values-file.yaml + + # If you are not using existing secrets, uncomment the following export commands + # + # export ADMIN_PASS=$(kubectl get secret -n ${NAMESPACE} ${ENGINE_RELEASE}-anchore-engine-admin-pass -o jsonpath="{.data.ANCHORE_ADMIN_PASSWORD}" | base64 -d -) + # export SET_ADMIN_PASS=("--set" "anchoreConfig.default_admin_password=${ADMIN_PASS}") + + helm install ${ENTERPRISE_RELEASE} -n ${NAMESPACE} ${SET_ADMIN_PASS[@]} -f ${ENTERPRISE_VALUES_FILE} anchore/enterprise --version=^1.0.0 + ``` + +1. **Verification and Cleanup**: After confirming that the Anchore Enterprise deployment is functional, you can safely uninstall the old Anchore Engine deployment. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= + + helm uninstall ${ENGINE_RELEASE} -n ${NAMESPACE} + ``` + + You may now have old engine persistent volume claims to delete. Delete these only when you are confident with the state of your new Enterprise Chart deployment. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= + + kubectl get pvc -n ${NAMESPACE} + kubectl delete pvc ${ENGINE_RELEASE}-anchore-engine-enterprise-feeds -n ${NAMESPACE} + +### If Using the Dependent PostgreSQL Chart + +1. **Scale Down Anchore Engine**: To avoid data inconsistency, scale down your existing Anchore Engine deployment to zero replicas. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= + + kubectl scale deployment --replicas=0 -l app=${ENGINE_RELEASE}-anchore-engine -n ${NAMESPACE} + ``` + +1. **Deploy Anchore Enterprise**: Use the converted values file to deploy the new Anchore Enterprise Helm chart. + + >**Note:** You will have to migrate data from the old database to the new one after the chart is installed. The enterprise chart contains a helper pod to aid with this. This helper pod is enabled using the `startMigrationPod=true` & `migrationAnchoreEngineSecretName=${ENGINE_RELEASE}-anchore-engine` flags in the following command. + > + > If you **are using existing secrets**, you should ignore setting the `ADMIN_PASS` and `SET_ADMIN_PASS` environment variables. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= + export ENTERPRISE_RELEASE= + export ENTERPRISE_VALUES_FILE=${PWD}/output/enterprise.my-values-file.yaml # The converted file + + # If you are using existing secrets, ignore the following export commands + # + export ADMIN_PASS=$(kubectl get secret -n ${NAMESPACE} ${ENGINE_RELEASE}-anchore-engine-admin-pass -o jsonpath="{.data.ANCHORE_ADMIN_PASSWORD}" | base64 -d -) + export SET_ADMIN_PASS=("--set" "anchoreConfig.default_admin_password=${ADMIN_PASS}") + + helm install ${ENTERPRISE_RELEASE} -n ${NAMESPACE} --set startMigrationPod=true --set migrationAnchoreEngineSecretName=${ENGINE_RELEASE}-anchore-engine ${SET_ADMIN_PASS[@]} anchore/enterprise -f ${ENTERPRISE_VALUES_FILE} --version=^1.0.0 + ``` + +1. **Scale Down Anchore Enterprise**: Before migrating the database, scale down the new Anchore Enterprise deployment to zero replicas. + + ```shell + export NAMESPACE=anchore + export ENTERPRISE_RELEASE= + + kubectl scale deployment -n ${NAMESPACE} --replicas=0 -l app.kubernetes.io/instance=${ENTERPRISE_RELEASE} + ``` + +1. **Database Preparation**: Replace the existing Anchore database schema with a new database schema in the PostgreSQL 13 deployment. If you set `startMigrationPod=true` as per the step above, you can exec into the migrator pod using the following commands: + + ```shell + export NAMESPACE=anchore + export ENTERPRISE_RELEASE= + + kubectl -n ${NAMESPACE} exec -it ${ENTERPRISE_RELEASE}-enterprise-migrate-db -- /bin/bash -c 'PGPASSWORD=${NEW_DB_PASSWORD} dropdb -h ${NEW_DB_HOST} -U ${NEW_DB_USERNAME} ${NEW_DB_NAME}; PGPASSWORD=${NEW_DB_PASSWORD} psql -h ${NEW_DB_HOST} -U ${NEW_DB_USERNAME} -c "CREATE DATABASE ${NEW_DB_NAME}" postgres' + ``` + +1. **Data Migration**: Migrate data from the old Anchore Engine database to the new Anchore Enterprise database. + + - If you are using the migration helper pod, exec into that pod and perform the database migration using following commands: + + ```shell + export NAMESPACE=anchore + export ENTERPRISE_RELEASE= + + kubectl -n ${NAMESPACE} exec -it ${ENTERPRISE_RELEASE}-enterprise-migrate-db -- /bin/bash -c 'PGPASSWORD=${OLD_DB_PASSWORD} pg_dump -h ${OLD_DB_HOST} -U ${OLD_DB_USERNAME} -c ${OLD_DB_NAME} | PGPASSWORD=${NEW_DB_PASSWORD} psql -h ${NEW_DB_HOST} -U ${NEW_DB_USERNAME} ${NEW_DB_NAME}' + ``` + + - If you are using your own pod then follow these steps + + 1. Gather old DB parameters from the secret ${OLD_ENGINE_RELEASE}-anchore-engine + 1. Gather new DB parameters from the new secret ${NEW_ENTERPRISE_RELEASE}-enterprise + 1. Start a migration pod that has all the psql binaries required e.g. docker.io/postgresql:13 + 1. Export all the required environment variables + + ```shell + PGPASSWORD=${OLD_DB_PASSWORD} pg_dump -h ${OLD_DB_HOST} -U ${OLD_DB_USERNAME} -c ${OLD_DB_NAME} | PGPASSWORD=${NEW_DB_PASSWORD} psql -h ${NEW_DB_HOST} -U ${NEW_DB_USERNAME} ${NEW_DB_NAME} + ``` + +1. **Upgrade Anchore Enterprise**: After migrating the data, upgrade the Anchore Enterprise Helm deployment. + + ```shell + export NAMESPACE=anchore + export ENTERPRISE_RELEASE= + export ENTERPRISE_VALUES_FILE=${PWD}/output/enterprise.my-values-file.yaml # The converted file + + helm upgrade ${ENTERPRISE_RELEASE} -n ${NAMESPACE} --set startMigrationPod=false anchore/enterprise -f ${ENTERPRISE_VALUES_FILE} --version=^1.0.0 + ``` + +1. **Final Verification and Cleanup**: After ensuring the new deployment is operational, uninstall the old Anchore Engine deployment. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= + + helm uninstall ${ENGINE_RELEASE} -n ${NAMESPACE} + ``` + + You may now have old engine persistent volume claims to delete. Delete these only when you are confident with the state of your new Enterprise Chart deployment. + + ```shell + export NAMESPACE=anchore + export ENGINE_RELEASE= + + kubectl get pvc -n ${NAMESPACE} + kubectl delete pvc ${ENGINE_RELEASE}-anchore-engine-enterprise-feeds -n ${NAMESPACE} + kubectl delete pvc ${ENGINE_RELEASE}-anchore-feeds-db -n ${NAMESPACE} + kubectl delete pvc ${ENGINE_RELEASE}-postgresql -n ${NAMESPACE} + ``` + +### Migration Rollback Steps + +In case of issues during the migration, execute the following rollback steps: + +1. **Uninstall the Anchore Enterprise Chart**: Remove the Anchore Enterprise deployment from your cluster. +1. **Remove Migrated Values File**: Delete the `output` directory generated by the migration script. +1. **Erase Enterprise Database**: Delete the database associated with the Anchore Enterprise deployment. +1. **(Optional) Restore Anchore-Engine Database**: If necessary, restore the Anchore-Engine database from a backup. +1. **Reactivate Anchore Engine**: Scale the Anchore Engine deployment back to its original state. +1. **Retry Migration**: Re-attempt the migration process following the initial steps. + +This rollback procedure is designed to revert your environment to its pre-migration state, allowing for a fresh migration attempt. + +## Parameters + +### Global Resource Parameters + +| Name | Description | Value | +| ------------------------- | --------------------------------------- | ----- | +| `global.fullnameOverride` | overrides the fullname set on resources | `""` | +| `global.nameOverride` | overrides the name set on resources | `""` | + +### Common Resource Parameters + +| Name | Description | Value | +| --------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------- | +| `image` | Image used for all Anchore Enterprise deployments, excluding Anchore UI | `docker.io/anchore/enterprise:v5.4.0` | +| `imagePullPolicy` | Image pull policy used by all deployments | `IfNotPresent` | +| `imagePullSecretName` | Name of Docker credentials secret for access to private repos | `anchore-enterprise-pullcreds` | +| `startMigrationPod` | Spin up a Database migration pod to help migrate the database to the new schema | `false` | +| `migrationPodImage` | The image reference to the migration pod | `docker.io/postgres:13-bookworm` | +| `migrationAnchoreEngineSecretName` | The name of the secret that has anchore-engine values | `my-engine-anchore-engine` | +| `serviceAccountName` | Name of a service account used to run all Anchore pods | `""` | +| `injectSecretsViaEnv` | Enable secret injection into pod via environment variables instead of via k8s secrets | `false` | +| `licenseSecretName` | Name of the Kubernetes secret containing your license.yaml file | `anchore-enterprise-license` | +| `certStoreSecretName` | Name of secret containing the certificates & keys used for SSL, SAML & CAs | `""` | +| `extraEnv` | Common environment variables set on all containers | `[]` | +| `useExistingSecrets` | forgoes secret creation and uses the secret defined in existingSecretName | `false` | +| `existingSecretName` | Name of an existing secret to be used for Anchore core services, excluding Anchore UI | `anchore-enterprise-env` | +| `labels` | Common labels set on all Kubernetes resources | `{}` | +| `annotations` | Common annotations set on all Kubernetes resources | `{}` | +| `nodeSelector` | Common nodeSelector set on all Kubernetes pods | `{}` | +| `tolerations` | Common tolerations set on all Kubernetes pods | `[]` | +| `affinity` | Common affinity set on all Kubernetes pods | `{}` | +| `scratchVolume.mountPath` | The mount path of an external volume for scratch space. Used for the following pods: analyzer, policy-engine, catalog, and reports | `/analysis_scratch` | +| `scratchVolume.fixGroupPermissions` | Enable an initContainer that will fix the fsGroup permissions on all scratch volumes | `false` | +| `scratchVolume.fixerInitContainerImage` | The image to use for the mode-fixer initContainer | `alpine` | +| `scratchVolume.details` | Details for the k8s volume to be created (defaults to default emptyDir) | `{}` | +| `extraVolumes` | mounts additional volumes to each pod | `[]` | +| `extraVolumeMounts` | mounts additional volumes to each pod | `[]` | +| `securityContext.runAsUser` | The securityContext runAsUser for all Anchore pods | `1000` | +| `securityContext.runAsGroup` | The securityContext runAsGroup for all Anchore pods | `1000` | +| `securityContext.fsGroup` | The securityContext fsGroup for all Anchore pods | `1000` | +| `containerSecurityContext` | The securityContext for all containers | `{}` | +| `probes.liveness.initialDelaySeconds` | Initial delay seconds for liveness probe | `120` | +| `probes.liveness.timeoutSeconds` | Timeout seconds for liveness probe | `10` | +| `probes.liveness.periodSeconds` | Period seconds for liveness probe | `10` | +| `probes.liveness.failureThreshold` | Failure threshold for liveness probe | `6` | +| `probes.liveness.successThreshold` | Success threshold for liveness probe | `1` | +| `probes.readiness.timeoutSeconds` | Timeout seconds for the readiness probe | `10` | +| `probes.readiness.periodSeconds` | Period seconds for the readiness probe | `10` | +| `probes.readiness.failureThreshold` | Failure threshold for the readiness probe | `3` | +| `probes.readiness.successThreshold` | Success threshold for the readiness probe | `1` | +| `doSourceAtEntry.enabled` | Does a `source` of the file path defined before starting Anchore services | `false` | +| `doSourceAtEntry.filePaths` | List of file paths to `source` before starting Anchore services | `[]` | +| `configOverride` | Allows for overriding the default Anchore configuration file | `""` | +| `scripts` | Collection of helper scripts usable in all anchore enterprise pods | `{}` | + +### Anchore Configuration Parameters + +| Name | Description | Value | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `anchoreConfig.service_dir` | Path to directory where default Anchore config files are placed at startup | `/anchore_service` | +| `anchoreConfig.log_level` | The log level for Anchore services | `INFO` | +| `anchoreConfig.allow_awsecr_iam_auto` | Enable AWS IAM instance role for ECR auth | `true` | +| `anchoreConfig.keys.secret` | The shared secret used for signing & encryption, auto-generated by Helm if not set. | `""` | +| `anchoreConfig.keys.privateKeyFileName` | The file name of the private key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.keys.publicKeyFileName` | The file name of the public key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.user_authentication.oauth.enabled` | Enable OAuth for Anchore user authentication | `true` | +| `anchoreConfig.user_authentication.oauth.default_token_expiration_seconds` | The expiration, in seconds, for OAuth tokens | `3600` | +| `anchoreConfig.user_authentication.oauth.refresh_token_expiration_seconds` | The expiration, in seconds, for OAuth refresh tokens | `86400` | +| `anchoreConfig.user_authentication.allow_api_keys_for_saml_users` | Enable API key generation and authentication for SAML users | `false` | +| `anchoreConfig.user_authentication.max_api_key_age_days` | The maximum age, in days, for API keys | `365` | +| `anchoreConfig.user_authentication.max_api_keys_per_user` | The maximum number of API keys per user | `100` | +| `anchoreConfig.user_authentication.remove_deleted_user_api_keys_older_than_days` | The number of days elapsed after a user API key is deleted before it is garbage collected (-1 to disable) | `365` | +| `anchoreConfig.user_authentication.hashed_passwords` | Enable storing passwords as secure hashes in the database | `true` | +| `anchoreConfig.user_authentication.sso_require_existing_users` | set to true in order to disable the SSO JIT provisioning during authentication | `false` | +| `anchoreConfig.metrics.enabled` | Enable Prometheus metrics for all Anchore services | `false` | +| `anchoreConfig.metrics.auth_disabled` | Disable auth on Prometheus metrics for all Anchore services | `false` | +| `anchoreConfig.webhooks` | Enable Anchore services to provide webhooks for external system updates | `{}` | +| `anchoreConfig.default_admin_password` | The password for the Anchore Enterprise admin user | `""` | +| `anchoreConfig.default_admin_email` | The email address used for the Anchore Enterprise admin user | `admin@myanchore` | +| `anchoreConfig.database.timeout` | | `120` | +| `anchoreConfig.database.ssl` | Enable SSL/TLS for the database connection | `false` | +| `anchoreConfig.database.sslMode` | The SSL mode to use for database connection | `verify-full` | +| `anchoreConfig.database.sslRootCertFileName` | File name of the database root CA certificate stored in the k8s secret specified with .Values.certStoreSecretName | `""` | +| `anchoreConfig.database.db_pool_size` | The database max connection pool size | `30` | +| `anchoreConfig.database.db_pool_max_overflow` | The maximum overflow size of the database connection pool | `100` | +| `anchoreConfig.database.engineArgs` | Set custom database engine arguments for SQLAlchemy | `{}` | +| `anchoreConfig.internalServicesSSL.enabled` | Force all Enterprise services to use SSL for internal communication | `false` | +| `anchoreConfig.internalServicesSSL.verifyCerts` | Enable cert verification against the local cert bundle, if this set to false self-signed certs are allowed | `false` | +| `anchoreConfig.internalServicesSSL.certSecretKeyFileName` | File name of the private key used for internal SSL stored in the secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.internalServicesSSL.certSecretCertFileName` | File name of the root CA certificate used for internal SSL stored in the secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.policyBundles` | Include custom Anchore policy bundles | `{}` | +| `anchoreConfig.apiext.external.enabled` | Allow overrides for constructing Anchore API URLs | `false` | +| `anchoreConfig.apiext.external.useTLS` | Enable TLS for external API access | `true` | +| `anchoreConfig.apiext.external.hostname` | Hostname for the external Anchore API | `""` | +| `anchoreConfig.apiext.external.port` | Port configured for external Anchore API | `8443` | +| `anchoreConfig.analyzer.cycle_timers.image_analyzer` | The interval between checks of the work queue for new analysis jobs | `1` | +| `anchoreConfig.analyzer.layer_cache_max_gigabytes` | Specify a cache size > 0GB to enable image layer caching | `0` | +| `anchoreConfig.analyzer.enable_hints` | Enable a user-supplied 'hints' file to override and/or augment the software artifacts found during analysis | `false` | +| `anchoreConfig.analyzer.configFile` | Custom Anchore Analyzer configuration file contents in YAML | `{}` | +| `anchoreConfig.catalog.cycle_timers.image_watcher` | Interval (seconds) to check for an update to a tag | `3600` | +| `anchoreConfig.catalog.cycle_timers.policy_eval` | Interval (seconds) to run a policy evaluation on images with policy_eval subscription activated | `3600` | +| `anchoreConfig.catalog.cycle_timers.vulnerability_scan` | Interval to run a vulnerability scan on images with vuln_update subscription activated | `14400` | +| `anchoreConfig.catalog.cycle_timers.analyzer_queue` | Interval to add new work on the image analysis queue | `1` | +| `anchoreConfig.catalog.cycle_timers.archive_tasks` | Interval to trigger Anchore Catalog archive Tasks | `43200` | +| `anchoreConfig.catalog.cycle_timers.notifications` | Interval in which notifications will be processed for state changes | `30` | +| `anchoreConfig.catalog.cycle_timers.service_watcher` | Interval of service state update poll, used for system status | `15` | +| `anchoreConfig.catalog.cycle_timers.policy_bundle_sync` | Interval of policy bundle sync | `300` | +| `anchoreConfig.catalog.cycle_timers.repo_watcher` | Interval between checks to repo for new tags | `60` | +| `anchoreConfig.catalog.cycle_timers.image_gc` | Interval for garbage collection of images marked for deletion | `60` | +| `anchoreConfig.catalog.cycle_timers.k8s_image_watcher` | Interval for the runtime inventory image analysis poll | `150` | +| `anchoreConfig.catalog.cycle_timers.resource_metrics` | Interval (seconds) for computing metrics from the DB | `60` | +| `anchoreConfig.catalog.cycle_timers.events_gc` | Interval (seconds) for cleaning up events in the system based on timestamp | `43200` | +| `anchoreConfig.catalog.cycle_timers.artifact_lifecycle_policy_tasks` | Interval (seconds) for running artifact lifecycle policy tasks | `43200` | +| `anchoreConfig.catalog.event_log` | Event log for webhooks, YAML configuration | `{}` | +| `anchoreConfig.catalog.analysis_archive` | Custom analysis archive YAML configuration | `{}` | +| `anchoreConfig.catalog.object_store` | Custom object storage YAML configuration | `{}` | +| `anchoreConfig.catalog.runtime_inventory.inventory_ttl_days` | TTL for runtime inventory. | `120` | +| `anchoreConfig.catalog.runtime_inventory.inventory_ingest_overwrite` | force runtime inventory to be overwritten upon every update for that reported context. | `false` | +| `anchoreConfig.catalog.down_analyzer_task_requeue` | Allows fast re-queueing when image status is 'analyzing' on an analyzer that is no longer in the 'up' state | `true` | +| `anchoreConfig.policy_engine.cycle_timers.feed_sync` | Interval to run a feed sync to get latest cve data | `14400` | +| `anchoreConfig.policy_engine.cycle_timers.feed_sync_checker` | Interval between checks to see if there needs to be a task queued | `3600` | +| `anchoreConfig.policy_engine.overrideFeedsToUpstream` | Override the Anchore Feeds URL to use the public upstream Anchore Feeds | `false` | +| `anchoreConfig.notifications.cycle_timers.notifications` | Interval that notifications are sent | `30` | +| `anchoreConfig.notifications.ui_url` | Set the UI URL that is included in the notification, defaults to the Enterprise UI service name | `""` | +| `anchoreConfig.reports.enable_graphiql` | Enable GraphiQL, a GUI for editing and testing GraphQL queries and mutations | `true` | +| `anchoreConfig.reports.async_execution_timeout` | Configure how long a scheduled query must be running for before it is considered timed out | `48h` | +| `anchoreConfig.reports.cycle_timers.reports_scheduled_queries` | Interval in seconds to check for scheduled queries that need to be run | `600` | +| `anchoreConfig.reports.use_volume` | Configure the reports service to buffer report generation to disk instead of in memory | `false` | +| `anchoreConfig.reports_worker.enable_data_ingress` | Enable periodically syncing data into the Anchore Reports Service | `true` | +| `anchoreConfig.reports_worker.enable_data_egress` | Periodically remove reporting data that has been removed in other parts of system | `false` | +| `anchoreConfig.reports_worker.data_egress_window` | defines a number of days to keep reporting data following its deletion in the rest of system. | `0` | +| `anchoreConfig.reports_worker.data_refresh_max_workers` | The maximum number of concurrent threads to refresh existing results (etl vulnerabilities and evaluations) in reports service. | `10` | +| `anchoreConfig.reports_worker.data_load_max_workers` | The maximum number of concurrent threads to load new results (etl vulnerabilities and evaluations) to reports service. | `10` | +| `anchoreConfig.reports_worker.cycle_timers.reports_image_load` | Interval that vulnerabilities for images are synced | `600` | +| `anchoreConfig.reports_worker.cycle_timers.reports_tag_load` | Interval that vulnerabilities by tags are synced | `600` | +| `anchoreConfig.reports_worker.cycle_timers.reports_runtime_inventory_load` | Interval that the runtime inventory is synced | `600` | +| `anchoreConfig.reports_worker.cycle_timers.reports_extended_runtime_vuln_load` | Interval extended runtime reports are synched (ecs, k8s containers and namespaces) | `1800` | +| `anchoreConfig.reports_worker.cycle_timers.reports_image_refresh` | Interval that images are refreshed | `7200` | +| `anchoreConfig.reports_worker.cycle_timers.reports_tag_refresh` | Interval that tags are refreshed | `7200` | +| `anchoreConfig.reports_worker.cycle_timers.reports_metrics` | Interval for how often reporting metrics are generated | `3600` | +| `anchoreConfig.reports_worker.cycle_timers.reports_image_egress` | Interval stale states are removed by image | `600` | +| `anchoreConfig.reports_worker.cycle_timers.reports_tag_egress` | Interval stale states are removed by tag | `600` | +| `anchoreConfig.ui.enable_proxy` | Trust a reverse proxy when setting secure cookies (via the `X-Forwarded-Proto` header) | `false` | +| `anchoreConfig.ui.enable_ssl` | Enable SSL in the Anchore UI container | `false` | +| `anchoreConfig.ui.enable_shared_login` | Allow single user to start multiple Anchore UI sessions | `true` | +| `anchoreConfig.ui.redis_flushdb` | Flush user session keys and empty data on Anchore UI startup | `true` | +| `anchoreConfig.ui.force_websocket` | Force WebSocket protocol for socket message communications | `false` | +| `anchoreConfig.ui.authentication_lock.count` | Number of failed authentication attempts allowed before a temporary lock is applied | `5` | +| `anchoreConfig.ui.authentication_lock.expires` | Authentication lock duration | `300` | +| `anchoreConfig.ui.custom_links` | List of up to 10 external links provided | `{}` | +| `anchoreConfig.ui.enable_add_repositories` | Specify what users can add image repositories to the Anchore UI | `{}` | +| `anchoreConfig.ui.log_level` | Descriptive detail of the application log output | `http` | +| `anchoreConfig.ui.enrich_inventory_view` | aggregate and include compliance and vulnerability data from the reports service. | `true` | +| `anchoreConfig.ui.appdb_config.native` | toggle the postgreSQL drivers used to connect to the database between the native and the NodeJS drivers. | `true` | +| `anchoreConfig.ui.appdb_config.pool.max` | maximum number of simultaneous connections allowed in the connection pool | `10` | +| `anchoreConfig.ui.appdb_config.pool.min` | minimum number of connections | `0` | +| `anchoreConfig.ui.appdb_config.pool.acquire` | the timeout in milliseconds used when acquiring a new connection | `30000` | +| `anchoreConfig.ui.appdb_config.pool.idle` | the maximum time that a connection can be idle before being released | `10000` | +| `anchoreConfig.ui.dbUser` | allows overriding and separation of the ui database user. | `""` | +| `anchoreConfig.ui.dbPassword` | allows overriding and separation of the ui database user authentication | `""` | + +### Anchore Analyzer k8s Deployment Parameters + +| Name | Description | Value | +| -------------------------------- | --------------------------------------------------------------------------- | ------ | +| `analyzer.replicaCount` | Number of replicas for the Anchore Analyzer deployment | `1` | +| `analyzer.service.port` | The port used for gatherings metrics when .Values.metricsEnabled=true | `8084` | +| `analyzer.extraEnv` | Set extra environment variables for Anchore Analyzer pods | `[]` | +| `analyzer.resources` | Resource requests and limits for Anchore Analyzer pods | `{}` | +| `analyzer.labels` | Labels for Anchore Analyzer pods | `{}` | +| `analyzer.annotations` | Annotation for Anchore Analyzer pods | `{}` | +| `analyzer.nodeSelector` | Node labels for Anchore Analyzer pod assignment | `{}` | +| `analyzer.tolerations` | Tolerations for Anchore Analyzer pod assignment | `[]` | +| `analyzer.affinity` | Affinity for Anchore Analyzer pod assignment | `{}` | +| `analyzer.serviceAccountName` | Service account name for Anchore API pods | `""` | +| `analyzer.scratchVolume.details` | Details for the k8s volume to be created for Anchore Analyzer scratch space | `{}` | + +### Anchore API k8s Deployment Parameters + +| Name | Description | Value | +| ------------------------- | ---------------------------------------------------- | ----------- | +| `api.replicaCount` | Number of replicas for Anchore API deployment | `1` | +| `api.service.type` | Service type for Anchore API | `ClusterIP` | +| `api.service.port` | Service port for Anchore API | `8228` | +| `api.service.annotations` | Annotations for Anchore API service | `{}` | +| `api.service.labels` | Labels for Anchore API service | `{}` | +| `api.service.nodePort` | nodePort for Anchore API service | `""` | +| `api.extraEnv` | Set extra environment variables for Anchore API pods | `[]` | +| `api.extraVolumes` | Define additional volumes for Anchore API pods | `[]` | +| `api.extraVolumeMounts` | Define additional volume mounts for Anchore API pods | `[]` | +| `api.resources` | Resource requests and limits for Anchore API pods | `{}` | +| `api.labels` | Labels for Anchore API pods | `{}` | +| `api.annotations` | Annotation for Anchore API pods | `{}` | +| `api.nodeSelector` | Node labels for Anchore API pod assignment | `{}` | +| `api.tolerations` | Tolerations for Anchore API pod assignment | `[]` | +| `api.affinity` | Affinity for Anchore API pod assignment | `{}` | +| `api.serviceAccountName` | Service account name for Anchore API pods | `""` | + +### Anchore Catalog k8s Deployment Parameters + +| Name | Description | Value | +| ------------------------------- | -------------------------------------------------------------------------- | ----------- | +| `catalog.replicaCount` | Number of replicas for the Anchore Catalog deployment | `1` | +| `catalog.service.type` | Service type for Anchore Catalog | `ClusterIP` | +| `catalog.service.port` | Service port for Anchore Catalog | `8082` | +| `catalog.service.annotations` | Annotations for Anchore Catalog service | `{}` | +| `catalog.service.labels` | Labels for Anchore Catalog service | `{}` | +| `catalog.service.nodePort` | nodePort for Anchore Catalog service | `""` | +| `catalog.extraEnv` | Set extra environment variables for Anchore Catalog pods | `[]` | +| `catalog.extraVolumes` | Define additional volumes for Anchore Catalog pods | `[]` | +| `catalog.extraVolumeMounts` | Define additional volume mounts for Anchore Catalog pods | `[]` | +| `catalog.resources` | Resource requests and limits for Anchore Catalog pods | `{}` | +| `catalog.labels` | Labels for Anchore Catalog pods | `{}` | +| `catalog.annotations` | Annotation for Anchore Catalog pods | `{}` | +| `catalog.nodeSelector` | Node labels for Anchore Catalog pod assignment | `{}` | +| `catalog.tolerations` | Tolerations for Anchore Catalog pod assignment | `[]` | +| `catalog.affinity` | Affinity for Anchore Catalog pod assignment | `{}` | +| `catalog.serviceAccountName` | Service account name for Anchore Catalog pods | `""` | +| `catalog.scratchVolume.details` | Details for the k8s volume to be created for Anchore Catalog scratch space | `{}` | + +### Anchore Feeds Chart Parameters + +| Name | Description | Value | +| -------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `feeds.chartEnabled` | Enable the Anchore Feeds chart | `true` | +| `feeds.standalone` | Sets the Anchore Feeds chart to run into non-standalone mode, for use with Anchore Enterprise. | `false` | +| `feeds.url` | Set the URL for a standalone Feeds service. Use when chartEnabled=false. | `""` | +| `feeds.resources` | Resource requests and limits for Anchore Feeds pods | `{}` | + +### Anchore Notifications Parameters + +| Name | Description | Value | +| ----------------------------------- | -------------------------------------------------------------- | ----------- | +| `notifications.replicaCount` | Number of replicas for the Anchore Notifications deployment | `1` | +| `notifications.service.type` | Service type for Anchore Notifications | `ClusterIP` | +| `notifications.service.port` | Service port for Anchore Notifications | `8668` | +| `notifications.service.annotations` | Annotations for Anchore Notifications service | `{}` | +| `notifications.service.labels` | Labels for Anchore Notifications service | `{}` | +| `notifications.service.nodePort` | nodePort for Anchore Notifications service | `""` | +| `notifications.extraEnv` | Set extra environment variables for Anchore Notifications pods | `[]` | +| `notifications.extraVolumes` | Define additional volumes for Anchore Notifications pods | `[]` | +| `notifications.extraVolumeMounts` | Define additional volume mounts for Anchore Notifications pods | `[]` | +| `notifications.resources` | Resource requests and limits for Anchore Notifications pods | `{}` | +| `notifications.labels` | Labels for Anchore Notifications pods | `{}` | +| `notifications.annotations` | Annotation for Anchore Notifications pods | `{}` | +| `notifications.nodeSelector` | Node labels for Anchore Notifications pod assignment | `{}` | +| `notifications.tolerations` | Tolerations for Anchore Notifications pod assignment | `[]` | +| `notifications.affinity` | Affinity for Anchore Notifications pod assignment | `{}` | +| `notifications.serviceAccountName` | Service account name for Anchore Notifications pods | `""` | + +### Anchore Policy Engine k8s Deployment Parameters + +| Name | Description | Value | +| ------------------------------------ | -------------------------------------------------------------------------------- | ----------- | +| `policyEngine.replicaCount` | Number of replicas for the Anchore Policy Engine deployment | `1` | +| `policyEngine.service.type` | Service type for Anchore Policy Engine | `ClusterIP` | +| `policyEngine.service.port` | Service port for Anchore Policy Engine | `8087` | +| `policyEngine.service.annotations` | Annotations for Anchore Policy Engine service | `{}` | +| `policyEngine.service.labels` | Labels for Anchore Policy Engine service | `{}` | +| `policyEngine.service.nodePort` | nodePort for Anchore Policy Engine service | `""` | +| `policyEngine.extraEnv` | Set extra environment variables for Anchore Policy Engine pods | `[]` | +| `policyEngine.extraVolumes` | Define additional volumes for Anchore Policy Engine pods | `[]` | +| `policyEngine.extraVolumeMounts` | Define additional volume mounts for Anchore Policy Engine pods | `[]` | +| `policyEngine.resources` | Resource requests and limits for Anchore Policy Engine pods | `{}` | +| `policyEngine.labels` | Labels for Anchore Policy Engine pods | `{}` | +| `policyEngine.annotations` | Annotation for Anchore Policy Engine pods | `{}` | +| `policyEngine.nodeSelector` | Node labels for Anchore Policy Engine pod assignment | `{}` | +| `policyEngine.tolerations` | Tolerations for Anchore Policy Engine pod assignment | `[]` | +| `policyEngine.affinity` | Affinity for Anchore Policy Engine pod assignment | `{}` | +| `policyEngine.serviceAccountName` | Service account name for Anchore Policy Engine pods | `""` | +| `policyEngine.scratchVolume.details` | Details for the k8s volume to be created for Anchore Policy Engine scratch space | `{}` | + +### Anchore Reports Parameters + +| Name | Description | Value | +| ------------------------------- | -------------------------------------------------------------------------- | ----------- | +| `reports.replicaCount` | Number of replicas for the Anchore Reports deployment | `1` | +| `reports.service.type` | Service type for Anchore Reports | `ClusterIP` | +| `reports.service.port` | Service port for Anchore Reports | `8558` | +| `reports.service.annotations` | Annotations for Anchore Reports service | `{}` | +| `reports.service.labels` | Labels for Anchore Reports service | `{}` | +| `reports.service.nodePort` | nodePort for Anchore Reports service | `""` | +| `reports.extraEnv` | Set extra environment variables for Anchore Reports pods | `[]` | +| `reports.extraVolumes` | Define additional volumes for Anchore Reports pods | `[]` | +| `reports.extraVolumeMounts` | Define additional volume mounts for Anchore Reports pods | `[]` | +| `reports.resources` | Resource requests and limits for Anchore Reports pods | `{}` | +| `reports.labels` | Labels for Anchore Reports pods | `{}` | +| `reports.annotations` | Annotation for Anchore Reports pods | `{}` | +| `reports.nodeSelector` | Node labels for Anchore Reports pod assignment | `{}` | +| `reports.tolerations` | Tolerations for Anchore Reports pod assignment | `[]` | +| `reports.affinity` | Affinity for Anchore Reports pod assignment | `{}` | +| `reports.serviceAccountName` | Service account name for Anchore Reports pods | `""` | +| `reports.scratchVolume.details` | Details for the k8s volume to be created for Anchore Reports scratch space | `{}` | + +### Anchore Reports Worker Parameters + +| Name | Description | Value | +| ----------------------------------- | --------------------------------------------------------------- | ----------- | +| `reportsWorker.replicaCount` | Number of replicas for the Anchore Reports deployment | `1` | +| `reportsWorker.service.type` | Service type for Anchore Reports Worker | `ClusterIP` | +| `reportsWorker.service.port` | Service port for Anchore Reports Worker | `8559` | +| `reportsWorker.service.annotations` | Annotations for Anchore Reports Worker service | `{}` | +| `reportsWorker.service.labels` | Labels for Anchore Reports Worker service | `{}` | +| `reportsWorker.service.nodePort` | nodePort for Anchore Reports Worker service | `""` | +| `reportsWorker.extraEnv` | Set extra environment variables for Anchore Reports Worker pods | `[]` | +| `reportsWorker.extraVolumes` | Define additional volumes for Anchore Reports Worker pods | `[]` | +| `reportsWorker.extraVolumeMounts` | Define additional volume mounts for Anchore Reports Worker pods | `[]` | +| `reportsWorker.resources` | Resource requests and limits for Anchore Reports Worker pods | `{}` | +| `reportsWorker.labels` | Labels for Anchore Reports Worker pods | `{}` | +| `reportsWorker.annotations` | Annotation for Anchore Reports Worker pods | `{}` | +| `reportsWorker.nodeSelector` | Node labels for Anchore Reports Worker pod assignment | `{}` | +| `reportsWorker.tolerations` | Tolerations for Anchore Reports Worker pod assignment | `[]` | +| `reportsWorker.affinity` | Affinity for Anchore Reports Worker pod assignment | `{}` | +| `reportsWorker.serviceAccountName` | Service account name for Anchore Reports Worker pods | `""` | + +### Anchore Simple Queue Parameters + +| Name | Description | Value | +| --------------------------------- | ------------------------------------------------------------- | ----------- | +| `simpleQueue.replicaCount` | Number of replicas for the Anchore Simple Queue deployment | `1` | +| `simpleQueue.service.type` | Service type for Anchore Simple Queue | `ClusterIP` | +| `simpleQueue.service.port` | Service port for Anchore Simple Queue | `8083` | +| `simpleQueue.service.annotations` | Annotations for Anchore Simple Queue service | `{}` | +| `simpleQueue.service.labels` | Labels for Anchore Simple Queue service | `{}` | +| `simpleQueue.service.nodePort` | nodePort for Anchore Simple Queue service | `""` | +| `simpleQueue.extraEnv` | Set extra environment variables for Anchore Simple Queue pods | `[]` | +| `simpleQueue.extraVolumes` | Define additional volumes for Anchore Simple Queue pods | `[]` | +| `simpleQueue.extraVolumeMounts` | Define additional volume mounts for Anchore Simple Queue pods | `[]` | +| `simpleQueue.resources` | Resource requests and limits for Anchore Simple Queue pods | `{}` | +| `simpleQueue.labels` | Labels for Anchore Simple Queue pods | `{}` | +| `simpleQueue.annotations` | Annotation for Anchore Simple Queue pods | `{}` | +| `simpleQueue.nodeSelector` | Node labels for Anchore Simple Queue pod assignment | `{}` | +| `simpleQueue.tolerations` | Tolerations for Anchore Simple Queue pod assignment | `[]` | +| `simpleQueue.affinity` | Affinity for Anchore Simple Queue pod assignment | `{}` | +| `simpleQueue.serviceAccountName` | Service account name for Anchore Simple Queue pods | `""` | + +### Anchore UI Parameters + +| Name | Description | Value | +| ---------------------------- | ----------------------------------------------------------------------------- | ---------------------------------------- | +| `ui.image` | Image used for the Anchore UI container | `docker.io/anchore/enterprise-ui:v5.4.0` | +| `ui.imagePullPolicy` | Image pull policy for Anchore UI image | `IfNotPresent` | +| `ui.existingSecretName` | Name of an existing secret to be used for Anchore UI DB and Redis endpoints | `anchore-enterprise-ui-env` | +| `ui.ldapsRootCaCertName` | Name of the custom CA certificate file store in `.Values.certStoreSecretName` | `""` | +| `ui.service.type` | Service type for Anchore UI | `ClusterIP` | +| `ui.service.port` | Service port for Anchore UI | `80` | +| `ui.service.annotations` | Annotations for Anchore UI service | `{}` | +| `ui.service.labels` | Labels for Anchore UI service | `{}` | +| `ui.service.sessionAffinity` | Session Affinity for Ui service | `ClientIP` | +| `ui.service.nodePort` | nodePort for Anchore UI service | `""` | +| `ui.extraEnv` | Set extra environment variables for Anchore UI pods | `[]` | +| `ui.extraVolumes` | Define additional volumes for Anchore UI pods | `[]` | +| `ui.extraVolumeMounts` | Define additional volume mounts for Anchore UI pods | `[]` | +| `ui.resources` | Resource requests and limits for Anchore UI pods | `{}` | +| `ui.labels` | Labels for Anchore UI pods | `{}` | +| `ui.annotations` | Annotation for Anchore UI pods | `{}` | +| `ui.nodeSelector` | Node labels for Anchore UI pod assignment | `{}` | +| `ui.tolerations` | Tolerations for Anchore UI pod assignment | `[]` | +| `ui.affinity` | Affinity for Anchore ui pod assignment | `{}` | +| `ui.serviceAccountName` | Service account name for Anchore UI pods | `""` | + +### Anchore Upgrade Job Parameters + +| Name | Description | Value | +| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `upgradeJob.enabled` | Enable the Anchore Enterprise database upgrade job | `true` | +| `upgradeJob.force` | Force the Anchore Feeds database upgrade job to run as a regular job instead of as a Helm hook | `false` | +| `upgradeJob.rbacCreate` | Create RBAC resources for the Anchore upgrade job | `true` | +| `upgradeJob.serviceAccountName` | Use an existing service account for the Anchore upgrade job | `""` | +| `upgradeJob.usePostUpgradeHook` | Use a Helm post-upgrade hook to run the upgrade job instead of the default pre-upgrade hook. This job does not require creating RBAC resources. | `false` | +| `upgradeJob.kubectlImage` | The image to use for the upgrade job's init container that uses kubectl to scale down deployments before an upgrade | `bitnami/kubectl:1.27` | +| `upgradeJob.nodeSelector` | Node labels for the Anchore upgrade job pod assignment | `{}` | +| `upgradeJob.tolerations` | Tolerations for the Anchore upgrade job pod assignment | `[]` | +| `upgradeJob.affinity` | Affinity for the Anchore upgrade job pod assignment | `{}` | +| `upgradeJob.annotations` | Annotations for the Anchore upgrade job | `{}` | +| `upgradeJob.resources` | Resource requests and limits for the Anchore upgrade job | `{}` | +| `upgradeJob.labels` | Labels for the Anchore upgrade job | `{}` | +| `upgradeJob.ttlSecondsAfterFinished` | The time period in seconds the upgrade job, and it's related pods should be retained for | `-1` | + +### Ingress Parameters + +| Name | Description | Value | +| -------------------------- | ------------------------------------------------------------------ | ---------------------- | +| `ingress.enabled` | Create an ingress resource for external Anchore service APIs | `false` | +| `ingress.labels` | Labels for the ingress resource | `{}` | +| `ingress.annotations` | Annotations for the ingress resource | `{}` | +| `ingress.apiHosts` | List of custom hostnames for the Anchore API | `[]` | +| `ingress.apiPaths` | The path used for accessing the Anchore API | `["/v2/","/version/"]` | +| `ingress.uiHosts` | List of custom hostnames for the Anchore UI | `[]` | +| `ingress.uiPath` | The path used for accessing the Anchore UI | `/` | +| `ingress.feedsHosts` | List of custom hostnames for the Anchore Feeds API | `[]` | +| `ingress.feedsPaths` | The path used for accessing the Anchore Feeds API | `["/v2/feeds/"]` | +| `ingress.tls` | Configure tls for the ingress resource | `[]` | +| `ingress.ingressClassName` | sets the ingress class name. As of k8s v1.18, this should be nginx | `nginx` | + +### Google CloudSQL DB Parameters + +| Name | Description | Value | +| -------------------------------- | ------------------------------------------------------------------------------ | ----------------------------------------- | +| `cloudsql.enabled` | Use CloudSQL proxy container for GCP database access | `false` | +| `cloudsql.image` | Image to use for GCE CloudSQL Proxy | `gcr.io/cloudsql-docker/gce-proxy:1.25.0` | +| `cloudsql.imagePullPolicy` | Image Pull Policy to use for CloudSQL image | `IfNotPresent` | +| `cloudsql.instance` | CloudSQL instance, eg: 'project:zone:instancename' | `""` | +| `cloudsql.useExistingServiceAcc` | Use existing service account | `false` | +| `cloudsql.serviceAccSecretName` | | `""` | +| `cloudsql.serviceAccJsonName` | | `""` | +| `cloudsql.extraArgs` | a list of extra arguments to be passed into the cloudsql container command. eg | `[]` | + +### Anchore UI Redis Parameters + +| Name | Description | Value | +| ------------------------------------- | ------------------------------------------------------------------------------------------------ | ------------------- | +| `ui-redis.chartEnabled` | Use the dependent chart for the UI Redis deployment | `true` | +| `ui-redis.externalEndpoint` | External Redis endpoint when not using Helm managed chart (eg redis://:@hostname:6379) | `""` | +| `ui-redis.auth.password` | Password used for connecting to Redis | `anchore-redis,123` | +| `ui-redis.architecture` | Redis deployment architecture | `standalone` | +| `ui-redis.master.persistence.enabled` | enables persistence | `false` | + +### Anchore Database Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------- | +| `postgresql.chartEnabled` | Use the dependent chart for Postgresql deployment | `true` | +| `postgresql.externalEndpoint` | External Postgresql hostname when not using Helm managed chart (eg. mypostgres.myserver.io) | `""` | +| `postgresql.auth.username` | Username used to connect to postgresql | `anchore` | +| `postgresql.auth.password` | Password used to connect to postgresql | `anchore-postgres,123` | +| `postgresql.auth.database` | Database name used when connecting to postgresql | `anchore` | +| `postgresql.primary.service.ports.postgresql` | Port used to connect to Postgresql | `5432` | +| `postgresql.primary.persistence.size` | Configure size of the persistent volume used with helm managed chart | `20Gi` | +| `postgresql.primary.extraEnvVars` | An array to add extra environment variables | `[]` | +| `postgresql.image.tag` | Specifies the image to use for this chart. | `13.11.0-debian-11-r15` | + + +## Release Notes + +For the latest updates and features in Anchore Enterprise, see the official [Release Notes](https://docs.anchore.com/current/docs/releasenotes/). + +- **Major Chart Version Change (e.g., v0.1.2 -> v1.0.0)**: Signifies an incompatible breaking change that necessitates manual intervention, such as updates to your values file or data migrations. +- **Minor Chart Version Change (e.g., v0.1.2 -> v0.2.0)**: Indicates a significant change to the deployment that does not require manual intervention. +- **Patch Chart Version Change (e.g., v0.1.2 -> v0.1.3)**: Indicates a backwards-compatible bug fix or documentation update. + +### V2.5.x + +- Deploys Anchore Enterprise v5.4.x. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/540/) for more information. +- Anchore Enterprise v5.4.0 introduces changes to how RBAC is managed. The chart has been updated to reflect these changes, no action is required. + - The rbac-manager and rbac-authorizer components are no longer necessary and have been removed from the chart. + - The `rbacManager` and `rbacAuthorizer` sections of the values file have been removed. + +### V2.4.x + +- Deploys Anchore Enterprise v5.3.x. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/530/) for more information. +- Bump kubeVersion requirement to allow deployment on Kubernetes v1.29.x clusters. + +### V2.3.0 + +- Deploys Anchore Enterprise v5.2.0. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/520/) for more information. +- The reports pod has been split out of the API deployment and is now a separate deployment. A new deployment called `reports_worker` has been added. This allows for more granular control over the resources allocated to the reports and reports_worker services. + - :warning: **WARNING:** Values file changes necessary: + - If you are using a custom port for the reports service, previously set with `api.service.reportsPort`, you will need to update your values file to use `reports.service.port` instead. + - Component specific configurations such as resources (as well as annotations, labels, extraEnv, etc) were previously set for both reports pods found in the `reports_deployment` and `api_deployment` using the `reports.resources` section of the values file. These have been split into separate deployments and the resources are now set in the `reports.resources` and `reports_worker.resources` sections of the values file. If you are using custom resources, you will need to update your values file to reflect this change. +- The reports service is now an internal service and the GraphQLAPI/ReportsAPI is served to users by the API service and routed internally in the deployment as needed. This version of the chart removed deprecated ingress configurations to accommodate this change. Update your values file to remove all references to the `reports` service in the `ingress` section. + +### V2.2.0 + +- The following keys were changed: + 1. anchoreConfig.user_authentication.oauth.allow_api_keys_for_saml_users -> anchoreConfig.user_authentication.allow_api_keys_for_saml_users + 2. anchoreConfig.user_authentication.oauth.max_api_key_age_days -> anchoreConfig.user_authentication.max_api_key_age_days + 3. anchoreConfig.user_authentication.oauth.max_api_keys_per_user -> anchoreConfig.user_authentication.max_api_keys_per_user + +### V2.1.0 + +- Deploys Anchore Enterprise v5.1.0. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/510/) for more information. +- The Redis client utilized by the UI has been updated and no longer requires a username to be specified in the URI. The chart configuration has been updated to reflect this change. If you are using secrets generated by the chart, no action is required. + + - :warning: **WARNING:** If you are using existing secrets, you will need to update your ANCHORE_REDIS_URI environment variable to remove the `nouser` username. The UI will not function without this change. For example: + + ```yaml + ANCHORE_REDIS_URI: redis://:anchore-redis,123@anchore-ui-redis:6379 + ``` + +### v2.0.0 + +- Deploys Anchore Enterprise v5.0.0 +- Anchore Enterprise v5.0.0 introduces a breaking change to the API endpoints, and requires updating any external integrations to use the new endpoints. See the [Migration Guide](https://docs.anchore.com/current/docs/deployment/upgrade/5.0/) for more information. +- The following values were removed as only the `v2` API is supported in Anchore Enterprise 5.0.0: + - `api.service.apiVersion` + - `notifications.service.apiVersion` + - `reports.service.apiVersion` + - `rbacManager.service.apiVersion` + - `feeds.service.apiVersion` + +### v1.0.0 + +- This is a stable release of the Anchore Enterprise Helm chart and is recommended for production deployments. +- Deploys Anchore Enterprise v4.9.3. +- This version of the chart is required for the migration from the anchore-engine chart, and is a pre-requisite for Anchore Enterprise 5.0. + +### v0.x.x + +- This is a pre-release version of the Anchore Enterprise Helm chart and is not recommended for production deployments. diff --git a/stable/enterprise/files/default_config.yaml b/stable/enterprise/files/default_config.yaml new file mode 100644 index 00000000..b6744018 --- /dev/null +++ b/stable/enterprise/files/default_config.yaml @@ -0,0 +1,252 @@ +service_dir: ${ANCHORE_SERVICE_DIR} +tmp_dir: ${ANCHORE_TMP_DIR} +log_level: ${ANCHORE_LOG_LEVEL} + +allow_awsecr_iam_auto: ${ANCHORE_ALLOW_ECR_IAM_AUTO} +host_id: "${ANCHORE_HOST_ID}" +internal_ssl_verify: ${ANCHORE_INTERNAL_SSL_VERIFY} +image_analyze_timeout_seconds: ${ANCHORE_IMAGE_ANALYZE_TIMEOUT_SECONDS} + +global_client_connect_timeout: ${ANCHORE_GLOBAL_CLIENT_CONNECT_TIMEOUT} +global_client_read_timeout: ${ANCHORE_GLOBAL_CLIENT_READ_TIMEOUT} +server_request_timeout_seconds: ${ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC} + +license_file: ${ANCHORE_LICENSE_FILE} +auto_restart_services: false + +max_source_import_size_mb: ${ANCHORE_MAX_IMPORT_SOURCE_SIZE_MB} +max_import_content_size_mb: ${ANCHORE_MAX_IMPORT_CONTENT_SIZE_MB} + +max_compressed_image_size_mb: ${ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB} + +metrics: + enabled: ${ANCHORE_ENABLE_METRICS} + auth_disabled: ${ANCHORE_DISABLE_METRICS_AUTH} + +webhooks: {{- toYaml .Values.anchoreConfig.webhooks | nindent 2 }} + +default_admin_password: "${ANCHORE_ADMIN_PASSWORD}" +default_admin_email: ${ANCHORE_ADMIN_EMAIL} + +keys: + secret: "${ANCHORE_SAML_SECRET}" + public_key_path: ${ANCHORE_AUTH_PRIVKEY} + private_key_path: ${ANCHORE_AUTH_PUBKEY} + +user_authentication: + oauth: + enabled: ${ANCHORE_OAUTH_ENABLED} + default_token_expiration_seconds: ${ANCHORE_OAUTH_TOKEN_EXPIRATION} + refresh_token_expiration_seconds: ${ANCHORE_OAUTH_REFRESH_TOKEN_EXPIRATION} + hashed_passwords: ${ANCHORE_AUTH_ENABLE_HASHED_PASSWORDS} + sso_require_existing_users: ${ANCHORE_SSO_REQUIRES_EXISTING_USERS} + allow_api_keys_for_saml_users: {{ .Values.anchoreConfig.user_authentication.allow_api_keys_for_saml_users }} + max_api_key_age_days: {{ .Values.anchoreConfig.user_authentication.max_api_key_age_days }} + max_api_keys_per_user: {{ .Values.anchoreConfig.user_authentication.max_api_keys_per_user }} + remove_deleted_user_api_keys_older_than_days: {{ .Values.anchoreConfig.user_authentication.remove_deleted_user_api_keys_older_than_days }} + +credentials: + database: + user: "${ANCHORE_DB_USER}" + password: "${ANCHORE_DB_PASSWORD}" + host: "${ANCHORE_DB_HOST}" + port: "${ANCHORE_DB_PORT}" + name: "${ANCHORE_DB_NAME}" + db_connect_args: + timeout: ${ANCHORE_DB_TIMEOUT} + ssl: ${ANCHORE_DB_SSL} + {{- if .Values.anchoreConfig.database.ssl }} + sslmode: ${ANCHORE_DB_SSL_MODE} + sslrootcert: ${ANCHORE_DB_SSL_ROOT_CERT} + {{- end }} + db_pool_size: ${ANCHORE_DB_POOL_SIZE} + db_pool_max_overflow: ${ANCHORE_DB_POOL_MAX_OVERFLOW} + {{- with .Values.anchoreConfig.database.engineArgs }} + db_engine_args: {{- toYaml . | nindent 6 }} + {{- end }} + +services: + apiext: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + {{- if .Values.anchoreConfig.apiext.external.enabled }} + external_tls: {{ .Values.anchoreConfig.apiext.external.useTLS }} + external_hostname: {{ .Values.anchoreConfig.apiext.external.hostname }} + external_port: {{ .Values.anchoreConfig.apiext.external.port }} + {{- end }} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + + analyzer: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + cycle_timer_seconds: 1 + cycle_timers: {{- toYaml .Values.anchoreConfig.analyzer.cycle_timers | nindent 6 }} + analyzer_driver: 'nodocker' + layer_cache_enable: ${ANCHORE_LAYER_CACHE_ENABLED} + layer_cache_max_gigabytes: ${ANCHORE_LAYER_CACHE_SIZE_GB} + enable_hints: ${ANCHORE_HINTS_ENABLED} + enable_owned_package_filtering: ${ANCHORE_OWNED_PACKAGE_FILTERING_ENABLED} + keep_image_analysis_tmpfiles: ${ANCHORE_KEEP_IMAGE_ANALYSIS_TMPFILES} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + + catalog: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + cycle_timer_seconds: 1 + cycle_timers: {{- toYaml .Values.anchoreConfig.catalog.cycle_timers | nindent 6 }} + event_log: {{- toYaml .Values.anchoreConfig.catalog.event_log | nindent 6 }} + runtime_inventory: + inventory_ttl_days: ${ANCHORE_ENTERPRISE_RUNTIME_INVENTORY_TTL_DAYS} + inventory_ingest_overwrite: ${ANCHORE_ENTERPRISE_RUNTIME_INVENTORY_INGEST_OVERWRITE} + image_gc: + max_worker_threads: ${ANCHORE_CATALOG_IMAGE_GC_WORKERS} + runtime_compliance: + object_store_bucket: "runtime_compliance_check" + down_analyzer_task_requeue: ${ANCHORE_ANALYZER_TASK_REQUEUE} + import_operation_expiration_days: ${ANCHORE_IMPORT_OPERATION_EXPIRATION_DAYS} + analysis_archive: {{- toYaml .Values.anchoreConfig.catalog.analysis_archive | nindent 6 }} + object_store: {{- toYaml .Values.anchoreConfig.catalog.object_store | nindent 6 }} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + + simplequeue: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + + policy_engine: + enabled: true + require_auth: true + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + policy_evaluation_cache_ttl: ${ANCHORE_POLICY_EVAL_CACHE_TTL_SECONDS} + cycle_timer_seconds: 1 + cycle_timers: {{- toYaml .Values.anchoreConfig.policy_engine.cycle_timers | nindent 6 }} + enable_package_db_load: ${ANCHORE_POLICY_ENGINE_ENABLE_PACKAGE_DB_LOAD} + vulnerabilities: + sync: + enabled: true + ssl_verify: ${ANCHORE_FEEDS_SSL_VERIFY} + connection_timeout_seconds: 3 + read_timeout_seconds: 60 + data: + grypedb: + enabled: true + url: {{ template "enterprise.grypeProviderURL" . }} + packages: + enabled: ${ANCHORE_FEEDS_DRIVER_PACKAGES_ENABLED} + url: {{ template "enterprise.feedsURL" . }} + matching: + default: + search: + by_cpe: + enabled: ${ANCHORE_VULN_MATCHING_DEFAULT_SEARCH_BY_CPE_ENABLED} + ecosystem_specific: + dotnet: + search: + by_cpe: + enabled: ${ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_DOTNET_SEARCH_BY_CPE_ENABLED} + golang: + search: + by_cpe: + enabled: ${ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_GOLANG_SEARCH_BY_CPE_ENABLED} + java: + search: + by_cpe: + enabled: ${ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_JAVA_SEARCH_BY_CPE_ENABLED} + javascript: + search: + by_cpe: + enabled: ${ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_JAVASCRIPT_SEARCH_BY_CPE_ENABLED} + python: + search: + by_cpe: + enabled: ${ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_PYTHON_SEARCH_BY_CPE_ENABLED} + ruby: + search: + by_cpe: + enabled: ${ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_RUBY_SEARCH_BY_CPE_ENABLED} + stock: + search: + by_cpe: + # Disabling search by CPE for the stock matcher will entirely disable binary-only matches and is not advised + enabled: ${ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_STOCK_SEARCH_BY_CPE_ENABLED} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + + reports: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + enable_graphiql: ${ANCHORE_ENTERPRISE_REPORTS_ENABLE_GRAPHIQL} + cycle_timers: {{- toYaml .Values.anchoreConfig.reports.cycle_timers | nindent 6 }} + max_async_execution_threads: ${ANCHORE_ENTERPRISE_REPORTS_MAX_ASYNC_EXECUTION_THREADS} + async_execution_timeout: ${ANCHORE_ENTERPRISE_REPORTS_ASYNC_EXECUTION_TIMEOUT} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + use_volume: {{ .Values.anchoreConfig.reports.use_volume }} + + reports_worker: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + enable_data_ingress: ${ANCHORE_ENTERPRISE_REPORTS_ENABLE_DATA_INGRESS} + enable_data_egress: ${ANCHORE_ENTERPRISE_REPORTS_ENABLE_DATA_EGRESS} + data_egress_window: ${ANCHORE_ENTERPRISE_REPORTS_DATA_EGRESS_WINDOW} + data_refresh_max_workers: ${ANCHORE_ENTERPRISE_REPORTS_DATA_REFRESH_MAX_WORKERS} + data_load_max_workers: ${ANCHORE_ENTERPRISE_REPORTS_DATA_LOAD_MAX_WORKERS} + cycle_timers: {{- toYaml .Values.anchoreConfig.reports_worker.cycle_timers | nindent 6 }} + runtime_report_generation: + inventory_images_by_vulnerability: true + vulnerabilities_by_k8s_namespace: ${ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_K8S_NAMESPACE} + vulnerabilities_by_k8s_container: ${ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_K8S_CONTAINER} + vulnerabilities_by_ecs_container: ${ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_ECS_CONTAINER} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + + notifications: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + cycle_timers: {{- toYaml .Values.anchoreConfig.notifications.cycle_timers | nindent 6 }} + ui_url: ${ANCHORE_ENTERPRISE_UI_URL} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} diff --git a/stable/enterprise/templates/NOTES.txt b/stable/enterprise/templates/NOTES.txt new file mode 100644 index 00000000..15503e39 --- /dev/null +++ b/stable/enterprise/templates/NOTES.txt @@ -0,0 +1,33 @@ +To use Anchore you need the URL, username, and password to access the API and/or the UI. + +The Anchore API can be accessed via port {{ .Values.api.service.port }} on the following DNS name from within the cluster: + + {{ include "enterprise.api.fullname" . -}}.{{- .Release.Namespace -}}.svc.cluster.local + +The Anchore UI can be accessed via localhost:8080 with kubernetes port-forwarding: + + kubectl port-forward svc/{{- template "enterprise.ui.fullname" . }} 8080:{{- .Values.ui.service.port }} + +Get the default admin password using the following command: + + kubectl get secret {{ template "enterprise.fullname" . }} -o jsonpath='{.data.ANCHORE_ADMIN_PASSWORD}' | base64 -D + +* NOTE: On first startup of Anchore Enterprise, the policy-engine performs a CVE data sync which may take several minutes to complete. +During this time the system status will report 'partially_down' and any images added for analysis will stay in the 'not_analyzed' state. +Once the sync is complete, any queued images will be analyzed and the system status will change to 'all_up'. + +Initial setup time can be >120sec for postgresql setup and readiness checks to pass for the services as indicated by pod state. +You can check with: + + kubectl get pods -l app.kubernetes.io/name={{- template "enterprise.fullname" . -}},app.kubernetes.io/component=api + +{{ if and .Values.useExistingSecrets .Release.IsUpgrade (semverCompare "~2.1.0" .Chart.Version) }} +****************** + +WARNING: This deployment is utilizing Existing Secrets. Your ANCHORE_REDIS_URI environment variable needs to be updated. +A username is no longer required, please remove `nouser` from the URI. The UI will not function unless this update is performed. + +For more details see the v2.1.0 release notes - https://github.com/anchore/anchore-charts/blob/main/stable/enterprise/README.md#v210 + +****************** +{{ end }} diff --git a/stable/enterprise/templates/_common.tpl b/stable/enterprise/templates/_common.tpl new file mode 100644 index 00000000..224ee43e --- /dev/null +++ b/stable/enterprise/templates/_common.tpl @@ -0,0 +1,325 @@ +{{/* +Common annotations +When calling this template, .component can be included in the context for component specific annotations +{{- include "enterprise.common.annotations" (merge (dict "component" $component) .) }} +*/}} +{{- define "enterprise.common.annotations" -}} +{{- $component := .component -}} +{{- if and (not .nil) (not .Values.annotations) (not (index .Values (print $component)).annotations) }} + {{- print "{}" }} +{{- else }} + {{- with .Values.annotations }} +{{ toYaml . }} + {{- end }} + {{- if $component }} + {{- with (index .Values (print $component)).annotations }} +{{ toYaml . }} + {{- end }} + {{- end }} +{{- end }} +{{- end -}} + + +{{/* +Setup a container for the cloudsql proxy to run in all pods when .Values.cloudsql.enabled = true +*/}} +{{- define "enterprise.common.cloudsqlContainer" -}} +- name: cloudsql-proxy + image: {{ .Values.cloudsql.image }} + imagePullPolicy: {{ .Values.cloudsql.imagePullPolicy }} +{{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 4 }} +{{- end }} + command: ["/cloud_sql_proxy"] + args: + - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" + volumeMounts: + - mountPath: "/var/{{ .Values.cloudsql.serviceAccSecretName }}" + name: {{ .Values.cloudsql.serviceAccSecretName }} + readOnly: true +{{- end }} +{{- end -}} + + +{{/* +Setup the common docker-entrypoint command for all Anchore Enterprise containers +*/}} +{{- define "enterprise.common.dockerEntrypoint" -}} +{{ print (include "enterprise.doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade +{{- end -}} + + +{{/* +Setup the common envFrom configs +*/}} +{{- define "enterprise.common.envFrom" -}} +- configMapRef: + name: {{ .Release.Name }}-enterprise-config-env-vars +{{- if not .Values.injectSecretsViaEnv }} + {{- if .Values.useExistingSecrets }} +- secretRef: + name: {{ .Values.existingSecretName }} + {{- else }} +- secretRef: + name: {{ template "enterprise.fullname" . }} + {{- end }} +{{- end }} +{{- end -}} + + +{{/* +Common environment variables +When calling this template, .component can be included in the context for component specific env vars +{{- include "enterprise.common.environment" (merge (dict "component" $component) .) }} +*/}} +{{- define "enterprise.common.environment" -}} +{{- $component := .component -}} +{{- with .Values.extraEnv }} +{{ toYaml . }} +{{- end }} +{{- if $component }} + {{- with (index .Values (print $component)).extraEnv }} +{{ toYaml . }} + {{- end }} +- name: ANCHORE_ENDPOINT_HOSTNAME + value: {{ include (printf "enterprise.%s.fullname" $component) . }} + {{- with (index .Values (print $component)).service }} +- name: ANCHORE_PORT + value: {{ .port | quote }} + {{- else }} +- name: ANCHORE_PORT + value: "null" + {{- end }} +{{- end }} +- name: ANCHORE_HOST_ID + valueFrom: + fieldRef: + fieldPath: metadata.name +{{- end -}} + + +{{/* +Common extraVolumes +When calling this template, .component can be included in the context for component specific annotations +{{- include "enterprise.common.extraVolumes" (merge (dict "component" $component) .) }} +*/}} +{{- define "enterprise.common.extraVolumes" -}} +{{- $component := .component -}} +{{- with .Values.extraVolumes }} +{{ toYaml . }} +{{- end }} +{{- if $component }} + {{- with (index .Values (print $component)).extraVolumes }} +{{ toYaml . }} + {{- end }} +{{- end }} +{{- end -}} + + +{{/* +Common extraVolumeMounts +When calling this template, .component can be included in the context for component specific annotations +{{- include "enterprise.common.extraVolumes" (merge (dict "component" $component) .) }} +*/}} +{{- define "enterprise.common.extraVolumeMounts" -}} +{{- $component := .component -}} +{{- with .Values.extraVolumeMounts }} +{{ toYaml . }} +{{- end }} +{{- if $component }} + {{- with (index .Values (print $component)).extraVolumeMounts }} +{{ toYaml . }} + {{- end }} +{{- end }} +{{- end -}} + + +{{/* +Setup the common fix permissions init container for all pods using a scratch volume +*/}} +{{- define "enterprise.common.fixPermissionsInitContainer" -}} +- name: mode-fixer + image: {{ .Values.scratchVolume.fixerInitContainerImage }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: "anchore-scratch" + mountPath: {{ .Values.scratchVolume.mountPath }} + command: + - sh + - -c + - (chmod 0775 {{ .Values.scratchVolume.mountPath }}; chgrp {{ .Values.securityContext.fsGroup }} {{ .Values.scratchVolume.mountPath }} ) +{{- end -}} + + +{{/* +Common labels +When calling this template, .component can be included in the context for component specific labels +{{- include "enterprise.common.labels" (merge (dict "component" $component) .) }} +*/}} +{{- define "enterprise.common.labels" -}} +{{- $component := .component -}} +{{- if $component }} + {{- with (index .Values (print $component)).labels }} +{{ toYaml . }} + {{- end }} +{{- end }} +{{- with .Values.labels }} +{{ toYaml . }} +{{- end }} +app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + {{- with $component }} +app.kubernetes.io/component: {{ . | lower }} + {{- end }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +app.kubernetes.io/part-of: anchore +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end -}} + + +{{/* +Setup the common liveness probes for all Anchore Enterprise containers +*/}} +{{- define "enterprise.common.livenessProbe" -}} +{{- $component := .component -}} +httpGet: + path: /health + port: {{ $component | lower }} + scheme: {{ include "enterprise.setProtocol" . | upper }} +initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }} +timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }} +periodSeconds: {{ .Values.probes.liveness.periodSeconds }} +failureThreshold: {{ .Values.probes.liveness.failureThreshold }} +successThreshold: {{ .Values.probes.liveness.successThreshold }} +{{- end -}} + + +{{/* +Return anchore default selector match labels +When calling this template, .component can be included in the context for component specific env vars +{{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) }} +*/}} +{{- define "enterprise.common.matchLabels" -}} +{{- $component := .component -}} +app.kubernetes.io/name: {{ template "enterprise.fullname" . }} +app.kubernetes.io/component: {{ $component | lower }} +{{- end -}} + + +{{/* +Setup the common pod spec configs +*/}} +{{- define "enterprise.common.podSpec" -}} +{{- $component := .component -}} +{{- with .Values.securityContext }} +securityContext: {{- toYaml . | nindent 2 }} +{{- end }} +{{- if or .Values.serviceAccountName (index .Values (print $component)).serviceAccountName (eq $component "upgradeJob") }} +serviceAccountName: {{ include "enterprise.serviceAccountName" (merge (dict "component" $component) .) }} +{{- end }} +{{- with .Values.imagePullSecretName }} +imagePullSecrets: + - name: {{ . }} +{{- end }} +{{- with (default .Values.nodeSelector (index .Values (print $component)).nodeSelector) }} +nodeSelector: {{- toYaml . | nindent 2 }} +{{- end }} +{{- with (default .Values.affinity (index .Values (print $component)).affinity) }} +affinity: {{- toYaml . | nindent 2 }} +{{- end }} +{{- with (default .Values.tolerations (index .Values (print $component)).tolerations) }} +tolerations: {{- toYaml . | nindent 2 }} +{{- end }} +{{- end -}} + + +{{/* +Setup the common readiness probes for all Anchore Enterprise containers +*/}} +{{- define "enterprise.common.readinessProbe" -}} +{{- $component := .component -}} +httpGet: + path: /health + port: {{ $component | lower }} + scheme: {{ include "enterprise.setProtocol" . | upper }} +timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }} +periodSeconds: {{ .Values.probes.readiness.periodSeconds }} +failureThreshold: {{ .Values.probes.readiness.failureThreshold }} +successThreshold: {{ .Values.probes.readiness.successThreshold }} +{{- end -}} + + +{{/* +Setup the common anchore scratch volume details config +*/}} +{{- define "enterprise.common.scratchVolume.details" -}} +{{- $component := .component -}} +{{- if (index .Values (print $component)).scratchVolume.details }} + {{- toYaml (index .Values (print $component)).scratchVolume.details }} +{{- else if .Values.scratchVolume.details }} + {{- toYaml .Values.scratchVolume.details }} +{{- else }} +emptyDir: {} +{{- end }} +{{- end -}} + + +{{/* +Setup the common anchore volume mounts +*/}} +{{- define "enterprise.common.volumeMounts" -}} +{{- $component := .component -}} +{{- include "enterprise.common.extraVolumeMounts" (merge (dict "component" $component) .) }} +- name: anchore-license + mountPath: /home/anchore/license.yaml + subPath: license.yaml +- name: config-volume + mountPath: /config/config.yaml + subPath: config.yaml +- name: anchore-scripts + mountPath: /scripts +{{- if .Values.certStoreSecretName }} +- name: certs + mountPath: /home/anchore/certs/ + readOnly: true +{{- end }} +{{- end -}} + + +{{/* +Setup the common anchore volumes +*/}} +{{- define "enterprise.common.volumes" -}} +{{- $component := .component -}} +{{- include "enterprise.common.extraVolumes" (merge (dict "component" $component) .) }} +- name: anchore-license + secret: + secretName: {{ .Values.licenseSecretName }} +- name: anchore-scripts + configMap: + name: {{ .Release.Name }}-enterprise-scripts + defaultMode: 0755 +- name: config-volume + configMap: + name: {{ template "enterprise.fullname" . }} +{{- with .Values.certStoreSecretName }} +- name: certs + secret: + secretName: {{ . }} +{{- end }} +{{- if .Values.cloudsql.useExistingServiceAcc }} +- name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} +{{- end }} +{{- end -}} diff --git a/stable/enterprise/templates/_helpers.tpl b/stable/enterprise/templates/_helpers.tpl new file mode 100644 index 00000000..b57dde0a --- /dev/null +++ b/stable/enterprise/templates/_helpers.tpl @@ -0,0 +1,156 @@ +{{/* +Create database hostname string from supplied values file. Used for setting the ANCHORE_DB_HOST env var in the UI & Engine secret. +*/}} +{{- define "enterprise.dbHostname" }} + {{- if and (index .Values "postgresql" "externalEndpoint") (not (index .Values "postgresql" "enabled")) }} + {{- print ( index .Values "postgresql" "externalEndpoint" ) }} + {{- else if and (index .Values "cloudsql" "enabled") (not (index .Values "postgresql" "enabled")) }} + {{- print "localhost" }} + {{- else }} + {{- $db_host := include "postgres.fullname" . }} + {{- printf "%s" $db_host -}} + {{- end }} +{{- end }} + + +{{/* +Return Anchore default admin password +*/}} +{{- define "enterprise.defaultAdminPassword" -}} + {{- if .Values.anchoreConfig.default_admin_password }} + {{- .Values.anchoreConfig.default_admin_password -}} + {{- else -}} + {{- randAlphaNum 32 -}} + {{- end -}} +{{- end -}} + +{{/* +Return Anchore SAML SECRET +*/}} +{{- define "enterprise.samlSecret" -}} + {{- if .Values.anchoreConfig.keys.secret }} + {{- .Values.anchoreConfig.keys.secret -}} + {{- else -}} + {{- randAlphaNum 32 -}} + {{- end -}} +{{- end -}} + +{{/* +Allows sourcing of a specified file in the entrypoint of all containers when .Values.doSourceAtEntry.enabled == true +*/}} +{{- define "enterprise.doSourceFile" }} + {{- if .Values.doSourceAtEntry.enabled }} + {{- range $index, $file := .Values.doSourceAtEntry.filePaths }} + {{- printf "if [ -f %v ];then source %v;fi;" $file $file }} + {{- end }} + {{- end }} +{{- end }} + + +{{/* +Returns the proper URL for the feeds service +*/}} +{{- define "enterprise.feedsURL" }} +{{- $anchoreFeedsURL := "" }} + {{- if .Values.feeds.url }} + {{- /* remove everything from the URL after /v2 to get the hostname, then use that to construct the proper URL */}} + {{- $regexSearchPattern := (printf "/v2.*$" | toString) }} + {{- $urlPathSuffix := (default "" (regexFind $regexSearchPattern .Values.feeds.url) ) }} + {{- $anchoreFeedsHost := (trimSuffix $urlPathSuffix .Values.feeds.url) -}} + {{- $anchoreFeedsURL = (printf "%s/v2/feeds" $anchoreFeedsHost) -}} + {{- else if .Values.feeds.chartEnabled }} + {{- $anchoreFeedsURL = (printf "%s://%s:%s/v2/feeds" (include "enterprise.feeds.setProtocol" .) (include "enterprise.feeds.fullname" .) (.Values.feeds.service.port | toString)) -}} + {{- end }} + {{- print $anchoreFeedsURL -}} +{{- end -}} + + +{{/* +Returns the proper URL for the grype provider +*/}} +{{- define "enterprise.grypeProviderURL" }} +{{- $grypeProviderFeedsExternalURL := "" -}} +{{- $regexSearchPattern := (printf "/v2.*$" | toString) }} + {{- if .Values.feeds.url }} + {{- /* remove everything from the URL after /v2 to get the hostname, then use that to construct the proper URL */}} + {{- $urlPathSuffix := (default "" ( regexFind $regexSearchPattern .Values.feeds.url )) -}} + {{- $anchoreFeedsHost := (trimSuffix $urlPathSuffix .Values.feeds.url) -}} + {{- $grypeProviderFeedsExternalURL = (printf "%s/v2/databases/grypedb" $anchoreFeedsHost) -}} + {{- else if .Values.feeds.chartEnabled }} + {{- $grypeProviderFeedsExternalURL = (printf "%s://%s:%s/v2/databases/grypedb" (include "enterprise.feeds.setProtocol" .) (include "enterprise.feeds.fullname" .) (.Values.feeds.service.port | toString)) -}} + {{- end }} + + {{- /* Set the grypeProviderFeedsExternalURL to upstream feeds if still unset or if specifically overridden */}} + {{- if or (empty $grypeProviderFeedsExternalURL) .Values.anchoreConfig.policy_engine.overrideFeedsToUpstream -}} + {{- $grypeProviderFeedsExternalURL = "https://toolbox-data.anchore.io/grype/databases/listing.json" -}} + {{- end }} + {{- print $grypeProviderFeedsExternalURL -}} +{{- end -}} + + +{{/* +Set the appropriate kubernetes service account name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "enterprise.serviceAccountName" -}} +{{- $component := .component -}} +{{- with (index .Values (print $component)).serviceAccountName }} + {{- print . | trunc 63 | trimSuffix "-" -}} +{{- else }} + {{- if and .Values.upgradeJob.rbacCreate (eq $component "upgradeJob") }} + {{- printf "%s-%s" (include "enterprise.fullname" .) "upgrade-sa" -}} + {{- else if .Values.serviceAccountName }} + {{- print .Values.serviceAccountName | trunc 63 | trimSuffix "-" -}} + {{- end }} +{{- end }} +{{- end -}} + + +{{/* +Return the proper protocol when Anchore internal SSL is enabled +*/}} +{{- define "enterprise.setProtocol" -}} + {{- if .Values.anchoreConfig.internalServicesSSL.enabled }} +{{- print "https" -}} + {{- else -}} +{{- print "http" -}} + {{- end }} +{{- end -}} + + +{{/* +Return the proper protocol when Anchore internal SSL is enabled +*/}} +{{- define "enterprise.feeds.setProtocol" -}} + {{- if .Values.feeds.anchoreConfig.internalServicesSSL.enabled }} +{{- print "https" -}} + {{- else -}} +{{- print "http" -}} + {{- end }} +{{- end -}} + + +{{/* +Return the database password for the Anchore Enterprise UI config +*/}} +{{- define "enterprise.ui.dbPassword" -}} +{{ ternary .Values.postgresql.auth.password .Values.anchoreConfig.ui.dbPassword (empty .Values.anchoreConfig.ui.dbPassword) }} +{{- end -}} + + +{{/* +Return the database user for the Anchore Enterprise UI config +*/}} +{{- define "enterprise.ui.dbUser" -}} +{{ ternary .Values.postgresql.auth.username .Values.anchoreConfig.ui.dbUser (empty .Values.anchoreConfig.ui.dbUser) }} +{{- end -}} + +{{/* +Set the nodePort for services if its defined +*/}} +{{- define "enterprise.service.nodePort" -}} +{{- $component := .component -}} +{{- if (index .Values (print $component)).service.nodePort -}} +nodePort: {{ (index .Values (print $component)).service.nodePort }} +{{- end -}} +{{- end -}} diff --git a/stable/enterprise/templates/_names.tpl b/stable/enterprise/templates/_names.tpl new file mode 100644 index 00000000..ec057737 --- /dev/null +++ b/stable/enterprise/templates/_names.tpl @@ -0,0 +1,84 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} + +{{- define "enterprise.fullname" -}} +{{- if .Values.global.fullnameOverride }} + {{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default .Chart.Name .Values.global.nameOverride }} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end -}} + +{{- define "enterprise.analyzer.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "analyzer"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.api.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "api"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.catalog.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "catalog"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.notifications.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "notifications"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.policyEngine.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "policy"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.reports.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "reports"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.reportsWorker.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "reportsworker"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.simpleQueue.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "simplequeue"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.ui.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name "ui"| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.upgradeJob.fullname" -}} +{{- $name := default .Chart.Name .Values.global.nameOverride -}} +{{- $forcedRevision := "" -}} +{{- if .Values.upgradeJob.force }} +{{- $forcedRevision = printf "-forced-%s" (randAlphaNum 5 | lower) -}} +{{- end }} +{{- printf "%s-%s-%s-%s%s" .Release.Name $name (.Chart.AppVersion | replace "." "") "upgrade" $forcedRevision| trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "enterprise.feeds.fullname" -}} +{{- if .Values.feeds.fullnameOverride }} + {{- .Values.feeds.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default "feeds" .Values.feeds.nameOverride -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end -}} + +{{- define "postgres.fullname" -}} +{{- printf "%s-%s" .Release.Name "postgresql" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "redis.fullname" -}} +{{- printf "%s-%s" .Release.Name "ui-redis" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/stable/enterprise/templates/analyzer_configmap.yaml b/stable/enterprise/templates/analyzer_configmap.yaml new file mode 100644 index 00000000..faab4b66 --- /dev/null +++ b/stable/enterprise/templates/analyzer_configmap.yaml @@ -0,0 +1,15 @@ +{{- $component := "analyzer" -}} + +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "enterprise.analyzer.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +data: + analyzer_config.yaml: | + # Anchore analyzer configuration + {{- with .Values.anchoreConfig.analyzer.configFile }} + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/stable/enterprise/templates/analyzer_deployment.yaml b/stable/enterprise/templates/analyzer_deployment.yaml new file mode 100644 index 00000000..88779220 --- /dev/null +++ b/stable/enterprise/templates/analyzer_deployment.yaml @@ -0,0 +1,66 @@ +{{- $component := "analyzer" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.analyzer.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.analyzer.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + checksum/enterprise-config: {{ include (print $.Template.BasePath "/anchore_configmap.yaml") . | sha256sum }} + checksum/analyzer-config: {{ include (print $.Template.BasePath "/analyzer_configmap.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + - name: "anchore-scratch" + {{- include "enterprise.common.scratchVolume.details" (merge (dict "component" $component) .) | nindent 10 }} + - name: analyzer-config-volume + configMap: + name: {{ template "enterprise.analyzer.fullname" . }} + {{- if and .Values.scratchVolume.fixGroupPermissions .Values.securityContext.fsGroup }} + initContainers: + {{- include "enterprise.common.fixPermissionsInitContainer" . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} analyzer + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + ports: + - name: {{ $component | lower }} + containerPort: {{ .Values.analyzer.service.port }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + - name: analyzer-config-volume + mountPath: "{{ .Values.anchoreConfig.service_dir }}/analyzer_config.yaml" + subPath: analyzer_config.yaml + - name: "anchore-scratch" + mountPath: {{ .Values.scratchVolume.mountPath }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.analyzer.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} diff --git a/stable/enterprise/templates/anchore_configmap.yaml b/stable/enterprise/templates/anchore_configmap.yaml new file mode 100644 index 00000000..f8683e25 --- /dev/null +++ b/stable/enterprise/templates/anchore_configmap.yaml @@ -0,0 +1,17 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "enterprise.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" . | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" . | nindent 4 }} + +data: + config.yaml: | + # Anchore Service Configuration File, mounted from a configmap + # +{{- if .Values.configOverride }} +{{ tpl .Values.configOverride . | indent 4 }} +{{- else }} +{{ tpl (.Files.Get "files/default_config.yaml") . | indent 4 }} +{{- end }} diff --git a/stable/enterprise/templates/anchore_secret.yaml b/stable/enterprise/templates/anchore_secret.yaml new file mode 100644 index 00000000..5556b3a5 --- /dev/null +++ b/stable/enterprise/templates/anchore_secret.yaml @@ -0,0 +1,34 @@ +{{- if not .Values.useExistingSecrets -}} +{{- /* + If release is being upgraded, don't recreate the defaultAdminPassword or samlSecret, instead get it from the corresponding existing + secret. +*/ -}} +{{- $anchoreAdminPass := (include "enterprise.defaultAdminPassword" . | quote) -}} +{{- $anchoreSamlSecret := (include "enterprise.samlSecret" . | quote) -}} +{{- if .Release.IsUpgrade -}} + {{- $anchoreSecret := (lookup "v1" "Secret" .Release.Namespace (include "enterprise.fullname" .)) -}} + {{- if $anchoreSecret -}} + {{- $anchoreAdminPass = (index $anchoreSecret.data "ANCHORE_ADMIN_PASSWORD" | b64dec) | quote -}} + {{- $anchoreSamlSecret = (index $anchoreSecret.data "ANCHORE_SAML_SECRET" | b64dec) | quote -}} + {{- end -}} +{{- end -}} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "enterprise.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" . | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" . | nindent 4 }} +type: Opaque +stringData: + ANCHORE_ADMIN_PASSWORD: {{ $anchoreAdminPass }} + ANCHORECTL_PASSWORD: {{ $anchoreAdminPass }} + ANCHORE_DB_HOST: {{ include "enterprise.dbHostname" . | quote }} + ANCHORE_DB_NAME: {{ index .Values "postgresql" "auth" "database" | quote }} + ANCHORE_DB_USER: {{ index .Values "postgresql" "auth" "username" | quote }} + ANCHORE_DB_PASSWORD: {{ index .Values "postgresql" "auth" "password" | quote }} + ANCHORE_DB_PORT: {{ index .Values "postgresql" "primary" "service" "ports" "postgresql" | quote }} + ANCHORE_SAML_SECRET: {{ $anchoreSamlSecret }} + +{{- end -}} diff --git a/stable/enterprise/templates/api_deployment.yaml b/stable/enterprise/templates/api_deployment.yaml new file mode 100644 index 00000000..ae7490de --- /dev/null +++ b/stable/enterprise/templates/api_deployment.yaml @@ -0,0 +1,93 @@ +{{- $component := "api" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.api.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.api.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if not .Values.injectSecretsViaEnv }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.anchoreConfig.policyBundles }} + checksum/policy-config: {{ include (print $.Template.BasePath "/policybundle_configmap.yaml") . | sha256sum }} + {{- end }} + checksum/enterprise-config: {{ include (print $.Template.BasePath "/anchore_configmap.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + {{- if .Values.anchoreConfig.policyBundles }} + - name: policy-bundle-volume + configMap: + name: {{ template "enterprise.fullname" . }}-policy-bundles + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} apiext + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + {{- if not .Values.injectSecretsViaEnv }} + - name: ANCHORE_CLI_PASS + valueFrom: + secretKeyRef: + name: {{ ternary .Values.existingSecretName (include "enterprise.fullname" .) .Values.useExistingSecrets }} + key: ANCHORE_ADMIN_PASSWORD + {{- end }} + ports: + - name: {{ $component | lower }} + containerPort: {{ .Values.api.service.port }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + {{- if .Values.anchoreConfig.policyBundles }} + {{- range $key, $value := .Values.anchoreConfig.policyBundles }} + - name: policy-bundle-volume + mountPath: {{ $.Values.anchoreConfig.service_dir }}/policies/{{ $key }} + subPath: {{ $key }} + {{- end }} + {{- end }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.api.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.api.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + type: {{ .Values.api.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.api.service.port }} + targetPort: {{ .Values.api.service.port }} + protocol: TCP + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/catalog_deployment.yaml b/stable/enterprise/templates/catalog_deployment.yaml new file mode 100644 index 00000000..f054e7dc --- /dev/null +++ b/stable/enterprise/templates/catalog_deployment.yaml @@ -0,0 +1,94 @@ +{{- $component := "catalog" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.catalog.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.catalog.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + checksum/enterprise-config: {{ include (print $.Template.BasePath "/anchore_configmap.yaml") . | sha256sum }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.anchoreConfig.policyBundles }} + checksum/policy-config: {{ include (print $.Template.BasePath "/policybundle_configmap.yaml") . | sha256sum }} + {{- end }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + - name: anchore-scratch + {{- include "enterprise.common.scratchVolume.details" (merge (dict "component" $component) .) | nindent 10 }} + {{- if .Values.anchoreConfig.policyBundles }} + - name: policy-bundle-volume + configMap: + name: {{ template "enterprise.fullname" . }}-policy-bundles + {{- end }} + {{- if and .Values.scratchVolume.fixGroupPermissions .Values.securityContext.fsGroup }} + initContainers: + {{- include "enterprise.common.fixPermissionsInitContainer" . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} catalog + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + ports: + - name: {{ $component | lower }} + containerPort: {{ .Values.catalog.service.port }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + - name: anchore-scratch + mountPath: {{ .Values.scratchVolume.mountPath }} + {{- if .Values.anchoreConfig.policyBundles }} + {{- range $key, $value := .Values.anchoreConfig.policyBundles }} + - name: policy-bundle-volume + mountPath: {{ $.Values.anchoreConfig.service_dir }}/policies/{{ $key }} + subPath: {{ $key }} + {{- end }} + {{- end }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.catalog.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.catalog.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + type: {{ .Values.catalog.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.catalog.service.port }} + targetPort: {{ .Values.catalog.service.port }} + protocol: TCP + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/envvars_configmap.yaml b/stable/enterprise/templates/envvars_configmap.yaml new file mode 100644 index 00000000..43be8740 --- /dev/null +++ b/stable/enterprise/templates/envvars_configmap.yaml @@ -0,0 +1,112 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-enterprise-config-env-vars + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" . | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" . | nindent 4 }} + +data: + ANCHORE_ADMIN_EMAIL: "{{ .Values.anchoreConfig.default_admin_email }}" + ANCHORE_ALLOW_ECR_IAM_AUTO: "{{ .Values.anchoreConfig.allow_awsecr_iam_auto }}" + ANCHORE_ANALYZER_TASK_REQUEUE: "true" + ANCHORE_AUTH_ENABLE_HASHED_PASSWORDS: "{{ .Values.anchoreConfig.user_authentication.hashed_passwords }}" +{{- with .Values.anchoreConfig.keys.publicKeyFileName }} + ANCHORE_AUTH_PRIVKEY: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_AUTH_PRIVKEY: "null" +{{- end }} +{{- with .Values.anchoreConfig.keys.privateKeyFileName }} + ANCHORE_AUTH_PUBKEY: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_AUTH_PUBKEY: "null" +{{- end }} + ANCHORE_CATALOG_IMAGE_GC_WORKERS: "4" + ANCHORE_CLI_URL: "http://localhost:8228" + ANCHORE_CLI_USER: "admin" + ANCHORECTL_URL: "http://localhost:8228" + ANCHORECTL_USERNAME: "admin" + ANCHORE_DISABLE_METRICS_AUTH: "{{ .Values.anchoreConfig.metrics.auth_disabled }}" + ANCHORE_DB_POOL_MAX_OVERFLOW: "{{ .Values.anchoreConfig.database.db_pool_max_overflow }}" + ANCHORE_DB_POOL_SIZE: "{{ .Values.anchoreConfig.database.db_pool_size }}" + ANCHORE_DB_SSL: "{{ .Values.anchoreConfig.database.ssl }}" + ANCHORE_DB_SSL_MODE: "{{ .Values.anchoreConfig.database.sslMode }}" +{{- with .Values.anchoreConfig.database.sslRootCertFileName }} + ANCHORE_DB_SSL_ROOT_CERT: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_DB_SSL_ROOT_CERT: "null" +{{- end }} + ANCHORE_DB_TIMEOUT: "{{ .Values.anchoreConfig.database.timeout }}" + ANCHORE_ENABLE_METRICS: "{{ .Values.anchoreConfig.metrics.enabled }}" + ANCHORE_ENTERPRISE_REPORTS_ASYNC_EXECUTION_TIMEOUT: "{{ .Values.anchoreConfig.reports.async_execution_timeout }}" + ANCHORE_ENTERPRISE_REPORTS_ENABLE_DATA_INGRESS: "{{ .Values.anchoreConfig.reports_worker.enable_data_ingress }}" + ANCHORE_ENTERPRISE_REPORTS_ENABLE_DATA_EGRESS: "{{ .Values.anchoreConfig.reports_worker.enable_data_egress }}" + ANCHORE_ENTERPRISE_REPORTS_DATA_EGRESS_WINDOW: "{{ .Values.anchoreConfig.reports_worker.data_egress_window }}" + ANCHORE_ENTERPRISE_REPORTS_DATA_REFRESH_MAX_WORKERS: "{{ .Values.anchoreConfig.reports_worker.data_refresh_max_workers }}" + ANCHORE_ENTERPRISE_REPORTS_DATA_LOAD_MAX_WORKERS: "{{ .Values.anchoreConfig.reports_worker.data_load_max_workers }}" + ANCHORE_ENTERPRISE_REPORTS_ENABLE_GRAPHIQL: "{{ .Values.anchoreConfig.reports.enable_graphiql }}" + ANCHORE_ENTERPRISE_REPORTS_MAX_ASYNC_EXECUTION_THREADS: "1" + ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_ECS_CONTAINER: "true" + ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_K8S_CONTAINER: "true" + ANCHORE_ENTERPRISE_REPORTS_VULNERABILITIES_BY_K8S_NAMESPACE: "true" +{{- if eq (toString .Values.anchoreConfig.catalog.runtime_inventory.image_ttl_days) "-1" }} + {{- fail "The Value `-1` is no longer valid for `.Values.anchoreConfig.catalog.runtime_inventory.image_ttl_days`. Please use `.Values.anchoreConfig.catalog.runtime_inventory.inventory_ingest_overwrite=true` to force runtime inventory to be overwritten upon every update for that reported context. `.Values.anchoreConfig.catalog.runtime_inventory.inventory_ttl_days` must be set to a value >1." -}} +{{- else }} + ANCHORE_ENTERPRISE_RUNTIME_INVENTORY_TTL_DAYS: "{{ .Values.anchoreConfig.catalog.runtime_inventory.inventory_ttl_days }}" + ANCHORE_ENTERPRISE_RUNTIME_INVENTORY_INGEST_OVERWRITE: "{{ .Values.anchoreConfig.catalog.runtime_inventory.inventory_ingest_overwrite }}" +{{- end }} +{{- with .Values.anchoreConfig.notifications.ui_url }} + ANCHORE_ENTERPRISE_UI_URL: "{{ . }}" +{{- else }} + ANCHORE_ENTERPRISE_UI_URL: {{ include "enterprise.ui.fullname" . | quote }} +{{- end }} + ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED: {{ dig "anchoreConfig" "feeds" "drivers" "github" "enabled" "false" .Values.feeds | quote }} + ANCHORE_FEEDS_DRIVER_MSRC_ENABLED: {{ dig "anchoreConfig" "feeds" "drivers" "msrc" "enabled" "false" .Values.feeds | quote }} + ANCHORE_FEEDS_DRIVER_NVDV2_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_PACKAGES_ENABLED: "false" + ANCHORE_FEEDS_SSL_VERIFY: "{{ .Values.anchoreConfig.internalServicesSSL.verifyCerts }}" + ANCHORE_FEEDS_VULNERABILITIES_ENABLED: "true" + ANCHORE_GLOBAL_CLIENT_CONNECT_TIMEOUT: "0" + ANCHORE_GLOBAL_CLIENT_READ_TIMEOUT: "0" + ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC: "180" + ANCHORE_HINTS_ENABLED: "{{ .Values.anchoreConfig.analyzer.enable_hints }}" + ANCHORE_IMAGE_ANALYZE_TIMEOUT_SECONDS: "3600" + ANCHORE_IMPORT_OPERATION_EXPIRATION_DAYS: "7" + ANCHORE_INTERNAL_SSL_VERIFY: "{{ .Values.anchoreConfig.internalServicesSSL.verifyCerts }}" + ANCHORE_KEEP_IMAGE_ANALYSIS_TMPFILES: "false" + ANCHORE_LAYER_CACHE_ENABLED: {{ ternary "true" "false" (gt .Values.anchoreConfig.analyzer.layer_cache_max_gigabytes 0.0) | quote }} + ANCHORE_LAYER_CACHE_SIZE_GB: "{{ .Values.anchoreConfig.analyzer.layer_cache_max_gigabytes }}" + ANCHORE_LICENSE_FILE: "/home/anchore/license.yaml" + ANCHORE_LOG_LEVEL: "{{ .Values.anchoreConfig.log_level }}" + ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB: "-1" + ANCHORE_MAX_IMPORT_CONTENT_SIZE_MB: "100" + ANCHORE_MAX_IMPORT_SOURCE_SIZE_MB: "100" + ANCHORE_MAX_REQUEST_THREADS: "50" + ANCHORE_OAUTH_ENABLED: "{{ .Values.anchoreConfig.user_authentication.oauth.enabled }}" + ANCHORE_OAUTH_TOKEN_EXPIRATION: "{{ .Values.anchoreConfig.user_authentication.oauth.default_token_expiration_seconds }}" + ANCHORE_OAUTH_REFRESH_TOKEN_EXPIRATION: "{{ .Values.anchoreConfig.user_authentication.oauth.refresh_token_expiration_seconds }}" + ANCHORE_OWNED_PACKAGE_FILTERING_ENABLED: "true" + ANCHORE_POLICY_ENGINE_ENABLE_PACKAGE_DB_LOAD: "true" + ANCHORE_POLICY_EVAL_CACHE_TTL_SECONDS: "3600" + ANCHORE_SAML_SECRET: "null" + ANCHORE_SERVICE_DIR: "{{ .Values.anchoreConfig.service_dir }}" + ANCHORE_SSL_ENABLED: "{{ .Values.anchoreConfig.internalServicesSSL.enabled }}" +{{- with .Values.anchoreConfig.internalServicesSSL.certSecretCertFileName }} + ANCHORE_SSL_CERT: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_SSL_CERT: "null" +{{- end }} +{{- with .Values.anchoreConfig.internalServicesSSL.certSecretKeyFileName }} + ANCHORE_SSL_KEY: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_SSL_KEY: "null" +{{- end }} + ANCHORE_SSO_REQUIRES_EXISTING_USERS: "{{ .Values.anchoreConfig.user_authentication.sso_require_existing_users }}" + ANCHORE_TMP_DIR: "{{ .Values.scratchVolume.mountPath }}" + ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_DOTNET_SEARCH_BY_CPE_ENABLED: "true" + ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_GOLANG_SEARCH_BY_CPE_ENABLED: "true" + ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_JAVA_SEARCH_BY_CPE_ENABLED: "true" + ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_JAVASCRIPT_SEARCH_BY_CPE_ENABLED: "false" + ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_PYTHON_SEARCH_BY_CPE_ENABLED: "true" + ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_RUBY_SEARCH_BY_CPE_ENABLED: "true" + ANCHORE_VULN_MATCHING_ECOSYSTEM_SPECIFIC_STOCK_SEARCH_BY_CPE_ENABLED: "true" diff --git a/stable/enterprise/templates/hooks/post-upgrade/upgrade_job.yaml b/stable/enterprise/templates/hooks/post-upgrade/upgrade_job.yaml new file mode 100644 index 00000000..219c990d --- /dev/null +++ b/stable/enterprise/templates/hooks/post-upgrade/upgrade_job.yaml @@ -0,0 +1,71 @@ +{{- if and .Values.upgradeJob.enabled .Values.upgradeJob.usePostUpgradeHook -}} +{{- $component := "upgradeJob" -}} + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "enterprise.upgradeJob.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 4 }} + {{- if not .Values.upgradeJob.force }} + "helm.sh/hook": post-upgrade + "helm.sh/hook-weight": "0" + {{- end }} +spec: + template: + metadata: + name: {{ template "enterprise.upgradeJob.fullname" . }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + restartPolicy: Never + volumes: {{- include "enterprise.common.extraVolumes" (merge (dict "component" $component) .) | nindent 8 }} + {{- with .Values.certStoreSecretName }} + - name: certs + secret: + secretName: {{ . }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: anchore-upgrade + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: {{- toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/bash", "-c"] + args: + {{- if not .Values.anchoreConfig.database.ssl }} + - | + {{ print (include "enterprise.doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}":"${ANCHORE_DB_PORT}"/"${ANCHORE_DB_NAME}" upgrade --dontask; + {{- else if eq .Values.anchoreConfig.database.sslMode "require" }} + - | + {{ print (include "enterprise.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}":"${ANCHORE_DB_PORT}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode }} upgrade --dontask; + {{- else }} + - | + {{ print (include "enterprise.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}":"${ANCHORE_DB_PORT}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreConfig.database.sslRootCertFileName }} upgrade --dontask; + {{- end }} + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + volumeMounts: {{- include "enterprise.common.extraVolumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + {{- if .Values.certStoreSecretName }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.upgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/stable/enterprise/templates/hooks/pre-upgrade/upgrade_job.yaml b/stable/enterprise/templates/hooks/pre-upgrade/upgrade_job.yaml new file mode 100644 index 00000000..0497c5d8 --- /dev/null +++ b/stable/enterprise/templates/hooks/pre-upgrade/upgrade_job.yaml @@ -0,0 +1,120 @@ +{{- if and .Values.upgradeJob.enabled (not .Values.upgradeJob.usePostUpgradeHook) -}} +{{- $component := "upgradeJob" -}} + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "enterprise.upgradeJob.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 4 }} + {{- if not .Values.upgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "3" + "helm.sh/hook-delete-policy": before-hook-creation + {{- end }} +spec: + {{- if ne (.Values.upgradeJob.ttlSecondsAfterFinished | quote) (-1 | quote) }} + ttlSecondsAfterFinished: {{ .Values.upgradeJob.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + name: {{ template "enterprise.upgradeJob.fullname" . }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + restartPolicy: Never + volumes: {{- include "enterprise.common.extraVolumes" (merge (dict "component" $component) .) | nindent 8 }} + {{- with .Values.certStoreSecretName }} + - name: certs + secret: + secretName: {{ . }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} + {{- end }} + initContainers: + - name: scale-down-anchore + image: {{ .Values.upgradeJob.kubectlImage }} + command: ["/bin/bash", "-c"] + args: + - | + kubectl scale deployments --all --replicas=0 -l app.kubernetes.io/name={{ template "enterprise.fullname" . }}; + while [[ $(kubectl get pods -l app.kubernetes.io/name={{ template "enterprise.fullname" . }} --field-selector=status.phase=Running --no-headers | tee /dev/stderr | wc -l) -gt 0 ]]; do + echo 'waiting for pods to go down...' && sleep 5; + done + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.upgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + - name: wait-for-db + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + command: ["/bin/bash", "-c"] + args: + - | + while true; do + CONNSTR=postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}":"${ANCHORE_DB_PORT}"/"${ANCHORE_DB_NAME}" + if [[ ${ANCHORE_DB_SSL_MODE} != null ]]; then + CONNSTR=${CONNSTR}?sslmode=${ANCHORE_DB_SSL_MODE} + fi + if [[ ${ANCHORE_DB_SSL_ROOT_CERT} != null ]]; then + CONNSTR=${CONNSTR}\&sslrootcert=${ANCHORE_DB_SSL_ROOT_CERT} + fi + err=$(anchore-enterprise-manager db --db-connect ${CONNSTR} pre-upgrade-check 2>&1 > /dev/null) + if [[ !$err ]]; then + echo "Database is ready" + exit 0 + fi + echo "Database is not ready yet, sleeping 10 seconds..." + sleep 10 + done + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.upgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: upgrade-enterprise-db + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + volumeMounts: {{- include "enterprise.common.extraVolumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + {{- if .Values.certStoreSecretName }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.upgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/bash", "-c"] + args: + {{- if not .Values.anchoreConfig.database.ssl }} + - | + {{ print (include "enterprise.doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}":"${ANCHORE_DB_PORT}"/"${ANCHORE_DB_NAME}" upgrade --dontask; + {{- else if eq .Values.anchoreConfig.database.sslMode "require"}} + - | + {{ print (include "enterprise.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}":"${ANCHORE_DB_PORT}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode }} upgrade --dontask; + {{- else }} + - | + {{ print (include "enterprise.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_DB_USER}":"${ANCHORE_DB_PASSWORD}"@"${ANCHORE_DB_HOST}":"${ANCHORE_DB_PORT}"/"${ANCHORE_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreConfig.database.sslRootCertFileName }} upgrade --dontask; + {{- end }} +{{- end -}} diff --git a/stable/enterprise/templates/hooks/pre-upgrade/upgrade_rbac.yaml b/stable/enterprise/templates/hooks/pre-upgrade/upgrade_rbac.yaml new file mode 100644 index 00000000..515f4f9e --- /dev/null +++ b/stable/enterprise/templates/hooks/pre-upgrade/upgrade_rbac.yaml @@ -0,0 +1,76 @@ +{{- if and .Values.upgradeJob.enabled .Values.upgradeJob.rbacCreate (not .Values.upgradeJob.usePostUpgradeHook) -}} +{{- $component := "upgradeJob" -}} + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "enterprise.fullname" . }}-upgrade-sa + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 4 }} + {{- if not .Values.upgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "0" + {{- end }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "enterprise.fullname" . }}-upgrade-role-binding + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 4 }} + {{- if not .Values.upgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "0" + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "enterprise.fullname" . }}-upgrade-role +subjects: + - kind: ServiceAccount + name: {{ template "enterprise.fullname" . }}-upgrade-sa + namespace: {{ .Release.Namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "enterprise.fullname" . }}-upgrade-role + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 4 }} + {{- if not .Values.upgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "0" + {{- end }} +rules: + - apiGroups: + - extensions + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - apps + resources: + - deployments/scale + verbs: + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - watch + - list + - get +{{- end -}} diff --git a/stable/enterprise/templates/ingress.yaml b/stable/enterprise/templates/ingress.yaml new file mode 100644 index 00000000..6caacdda --- /dev/null +++ b/stable/enterprise/templates/ingress.yaml @@ -0,0 +1,140 @@ +{{- if .Values.ingress.enabled -}} +{{- $component := "ingress" -}} + +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} +apiVersion: networking.k8s.io/v1 +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ template "enterprise.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: +{{- if and .Values.ingress.ingressClassName (not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class")) }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- if or .Values.ingress.apiHosts .Values.ingress.uiHosts .Values.ingress.feedsHosts }} + {{- range $apiHostIndex, $apiHostName := .Values.ingress.apiHosts }} + - host: {{ $apiHostName | quote }} + http: + paths: + {{- range $apiPathIndex, $apiPath := $.Values.ingress.apiPaths }} + - path: {{ $apiPath }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "enterprise.api.fullname" $ }} + port: + number: {{ $.Values.api.service.port }} + {{- else }} + backend: + serviceName: {{ template "enterprise.api.fullname" $ }} + servicePort: {{ $.Values.api.service.port }} + {{- end }} + {{- end }} + {{- end }} + {{- range $uiHostIndex, $uiHostName := .Values.ingress.uiHosts }} + - host: {{ $uiHostName | quote }} + http: + paths: + - path: {{ $.Values.ingress.uiPath }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "enterprise.ui.fullname" $ }} + port: + number: {{ $.Values.ui.service.port }} + {{- else }} + backend: + serviceName: {{ template "enterprise.ui.fullname" $ }} + servicePort: {{ $.Values.ui.service.port }} + {{- end }} + {{- end }} + {{- range $feedsHostIndex, $feedsHostName := .Values.ingress.feedsHosts }} + - host: {{ $feedsHostName | quote }} + http: + paths: + {{- range $feedsPathIndex, $feedsPath := $.Values.ingress.feedsPaths }} + - path: {{ $feedsPath }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "enterprise.feeds.fullname" $ }} + port: + number: {{ $.Values.feeds.service.port }} + {{- else }} + backend: + serviceName: {{ template "enterprise.feeds.fullname" $ }} + servicePort: {{ $.Values.feeds.service.port }} + {{- end }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + {{- range .Values.ingress.apiPaths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "enterprise.api.fullname" $ }} + port: + number: {{ $.Values.api.service.port }} + {{- else }} + backend: + serviceName: {{ template "enterprise.api.fullname" $ }} + servicePort: {{ $.Values.api.service.port }} + {{- end }} + {{- end }} + {{- with .Values.ingress.uiPath }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "enterprise.ui.fullname" $ }} + port: + number: {{ $.Values.ui.service.port }} + {{- else }} + backend: + serviceName: {{ template "enterprise.ui.fullname" $ }} + servicePort: {{ $.Values.ui.service.port }} + {{- end }} + {{- end }} + {{- range .Values.ingress.feedsPaths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "enterprise.feeds.fullname" $ }} + port: + number: {{ $.Values.feeds.service.port }} + {{- else }} + backend: + serviceName: {{ template "enterprise.feeds.fullname" $ }} + servicePort: {{ $.Values.feeds.service.port }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/stable/enterprise/templates/migrate_pod.yaml b/stable/enterprise/templates/migrate_pod.yaml new file mode 100644 index 00000000..ff180b0f --- /dev/null +++ b/stable/enterprise/templates/migrate_pod.yaml @@ -0,0 +1,46 @@ +{{- $component := "migrate" -}} +{{- if .Values.startMigrationPod -}} +apiVersion: v1 +kind: Pod +metadata: + name: {{ .Release.Name }}-enterprise-migrate-db + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/component: upgradejob +spec: + containers: + - command: [ "/bin/bash", "-c", "--" ] + args: [ "sleep infinity" ] + image: {{ .Values.migrationPodImage }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + name: enterprise-migrate-db + env: + - name: NEW_DB_HOST + value: {{ .Release.Name }}-postgresql + - name: OLD_DB_HOST + valueFrom: + secretKeyRef: + key: ANCHORE_DB_HOST + name: {{ .Values.migrationAnchoreEngineSecretName }} + - name: NEW_DB_PASSWORD + value: {{ .Values.postgresql.auth.password }} + - name: OLD_DB_PASSWORD + valueFrom: + secretKeyRef: + key: ANCHORE_DB_PASSWORD + name: {{ .Values.migrationAnchoreEngineSecretName }} + - name: NEW_DB_USERNAME + value: {{ .Values.postgresql.auth.username }} + - name: OLD_DB_USERNAME + valueFrom: + secretKeyRef: + key: ANCHORE_DB_USER + name: {{ .Values.migrationAnchoreEngineSecretName }} + - name: OLD_DB_NAME + valueFrom: + secretKeyRef: + key: ANCHORE_DB_NAME + name: {{ .Values.migrationAnchoreEngineSecretName }} + - name: NEW_DB_NAME + value: {{ .Values.postgresql.auth.database }} +{{- end -}} diff --git a/stable/enterprise/templates/notifications_deployment.yaml b/stable/enterprise/templates/notifications_deployment.yaml new file mode 100644 index 00000000..aa9e3fe5 --- /dev/null +++ b/stable/enterprise/templates/notifications_deployment.yaml @@ -0,0 +1,71 @@ +{{- $component := "notifications" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.notifications.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.notifications.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + checksum/enterprise-config: {{ include (print $.Template.BasePath "/anchore_configmap.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} notifications + ports: + - containerPort: {{ .Values.notifications.service.port }} + name: {{ $component | lower }} + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.notifications.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.notifications.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + type: {{ .Values.notifications.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.notifications.service.port }} + targetPort: {{ .Values.notifications.service.port }} + protocol: TCP + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/policybundle_configmap.yaml b/stable/enterprise/templates/policybundle_configmap.yaml new file mode 100644 index 00000000..a03a3417 --- /dev/null +++ b/stable/enterprise/templates/policybundle_configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.anchoreConfig.policyBundles -}} +{{- $component := "policyEngine" -}} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "enterprise.fullname" . }}-policy-bundles + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +data: +{{- with .Values.anchoreConfig.policyBundles }} + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end -}} diff --git a/stable/enterprise/templates/policyengine_deployment.yaml b/stable/enterprise/templates/policyengine_deployment.yaml new file mode 100644 index 00000000..4bed6fbe --- /dev/null +++ b/stable/enterprise/templates/policyengine_deployment.yaml @@ -0,0 +1,79 @@ +{{- $component := "policyEngine" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.policyEngine.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.policyEngine.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + checksum/enterprise-config: {{ include (print $.Template.BasePath "/anchore_configmap.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + - name: anchore-scratch + {{- include "enterprise.common.scratchVolume.details" (merge (dict "component" $component) .) | nindent 10 }} + {{- if and .Values.scratchVolume.fixGroupPermissions .Values.securityContext.fsGroup }} + initContainers: + {{- include "enterprise.common.fixPermissionsInitContainer" . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} policy_engine + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + ports: + - name: {{ $component | lower }} + containerPort: {{ .Values.policyEngine.service.port }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + - name: "anchore-scratch" + mountPath: {{ .Values.scratchVolume.mountPath }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.policyEngine.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.policyEngine.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + type: {{ .Values.policyEngine.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.policyEngine.service.port }} + targetPort: {{ .Values.policyEngine.service.port }} + protocol: TCP + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/reports_deployment.yaml b/stable/enterprise/templates/reports_deployment.yaml new file mode 100644 index 00000000..5d41532f --- /dev/null +++ b/stable/enterprise/templates/reports_deployment.yaml @@ -0,0 +1,83 @@ +{{- $component := "reports" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.reports.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.reports.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + checksum/enterprise-config: {{ include (print $.Template.BasePath "/anchore_configmap.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + {{- if .Values.anchoreConfig.reports.use_volume }} + - name: "anchore-scratch" + {{- include "enterprise.common.scratchVolume.details" (merge (dict "component" $component) .) | nindent 10 }} + {{- end }} + {{- if and .Values.anchoreConfig.reports.use_volume .Values.scratchVolume.fixGroupPermissions .Values.securityContext.fsGroup }} + initContainers: + {{- include "enterprise.common.fixPermissionsInitContainer" . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} reports + ports: + - containerPort: {{ .Values.reports.service.port }} + name: {{ $component | lower }} + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + {{- if .Values.anchoreConfig.reports.use_volume }} + - name: "anchore-scratch" + mountPath: {{ .Values.scratchVolume.mountPath }} + {{- end }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.reports.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.reports.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + type: {{ .Values.reports.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.reports.service.port }} + targetPort: {{ .Values.reports.service.port }} + protocol: TCP + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/reportsworker_deployment.yaml b/stable/enterprise/templates/reportsworker_deployment.yaml new file mode 100644 index 00000000..9ffde13e --- /dev/null +++ b/stable/enterprise/templates/reportsworker_deployment.yaml @@ -0,0 +1,71 @@ +{{- $component := "reportsWorker" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.reportsWorker.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.reportsWorker.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + checksum/enterprise-config: {{ tpl (print $.Files.BasePath "/default_config.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} reports_worker + ports: + - containerPort: {{ .Values.reportsWorker.service.port }} + name: {{ $component | lower }} + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.reportsWorker.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.reportsWorker.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + type: {{ .Values.reportsWorker.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.reportsWorker.service.port }} + targetPort: {{ .Values.reportsWorker.service.port }} + protocol: TCP + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/scripts_configmap.yaml b/stable/enterprise/templates/scripts_configmap.yaml new file mode 100644 index 00000000..47416bca --- /dev/null +++ b/stable/enterprise/templates/scripts_configmap.yaml @@ -0,0 +1,9 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-enterprise-scripts + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" . | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" . | nindent 4}} +data: + {{ .Values.scripts | toYaml | nindent 2 }} diff --git a/stable/enterprise/templates/simplequeue_deployment.yaml b/stable/enterprise/templates/simplequeue_deployment.yaml new file mode 100644 index 00000000..21ab1342 --- /dev/null +++ b/stable/enterprise/templates/simplequeue_deployment.yaml @@ -0,0 +1,70 @@ +{{- $component := "simpleQueue" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.simpleQueue.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: {{ .Values.simpleQueue.replicaCount }} + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/anchore_secret.yaml") . | sha256sum }} + {{- end }} + checksum/enterprise-config: {{ include (print $.Template.BasePath "/anchore_configmap.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.volumes" (merge (dict "component" $component) .) | nindent 8 }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: {{ .Chart.Name }}-{{ $component | lower }} + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.common.dockerEntrypoint" .) }} simplequeue + envFrom: {{- include "enterprise.common.envFrom" . | nindent 12 }} + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + ports: + - name: {{ $component | lower }} + containerPort: {{ .Values.simpleQueue.service.port }} + volumeMounts: {{- include "enterprise.common.volumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + livenessProbe: {{- include "enterprise.common.livenessProbe" (merge (dict "component" $component) .) | nindent 12 }} + readinessProbe: {{- include "enterprise.common.readinessProbe" (merge (dict "component" $component) .) | nindent 12 }} + {{- with .Values.simpleQueue.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.simpleQueue.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + type: {{ .Values.simpleQueue.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.simpleQueue.service.port }} + targetPort: {{ .Values.simpleQueue.service.port }} + protocol: TCP + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/ui_configmap.yaml b/stable/enterprise/templates/ui_configmap.yaml new file mode 100644 index 00000000..6bd7d196 --- /dev/null +++ b/stable/enterprise/templates/ui_configmap.yaml @@ -0,0 +1,46 @@ +{{- $component := "ui" -}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "enterprise.ui.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +data: + config-ui.yaml: | + # Anchore UI configuration + {{- with .Values.anchoreConfig.ui.custom_links }} + custom_links: + title: {{ .title }} + links: + {{- range .links }} + {{- with . }} + - title: {{ .title }} + uri: {{ .uri }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.anchoreConfig.ui.enable_add_repositories }} + enable_add_repositories: + admin: {{ .admin }} + standard: {{ .standard }} + {{- end }} + reports_uri: '{{- include "enterprise.setProtocol" . -}}://{{- template "enterprise.api.fullname" . -}}:{{- .Values.api.service.port -}}/v2' + notifications_uri: '{{- include "enterprise.setProtocol" . -}}://{{- template "enterprise.api.fullname" . -}}:{{- .Values.api.service.port -}}/v2' + enterprise_uri: '{{- include "enterprise.setProtocol" . -}}://{{- template "enterprise.api.fullname" . -}}:{{- .Values.api.service.port -}}/v2' + # redis_uri: overridden in deployment using the `ANCHORE_REDIS_URI` environment variable + # appdb_uri: overridden in deployment using the `ANCHORE_APPDB_URI` environment variable + license_path: '/home/anchore/' + enable_ssl: {{ .Values.anchoreConfig.ui.enable_ssl }} + enable_proxy: {{ .Values.anchoreConfig.ui.enable_proxy }} + allow_shared_login: {{ .Values.anchoreConfig.ui.enable_shared_login }} + redis_flushdb: {{ .Values.anchoreConfig.ui.redis_flushdb }} + force_websocket: {{ .Values.anchoreConfig.ui.force_websocket }} + authentication_lock: + count: {{ .Values.anchoreConfig.ui.authentication_lock.count }} + expires: {{ .Values.anchoreConfig.ui.authentication_lock.expires }} + appdb_config: {{ toYaml .Values.anchoreConfig.ui.appdb_config | nindent 6}} + log_level: {{ .Values.anchoreConfig.ui.log_level | squote }} + enrich_inventory_view: {{ .Values.anchoreConfig.ui.enrich_inventory_view }} + enable_prometheus_metrics: {{ .Values.anchoreConfig.metrics.enabled }} diff --git a/stable/enterprise/templates/ui_deployment.yaml b/stable/enterprise/templates/ui_deployment.yaml new file mode 100644 index 00000000..dd36c1e2 --- /dev/null +++ b/stable/enterprise/templates/ui_deployment.yaml @@ -0,0 +1,138 @@ +{{- $component := "ui" -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise.ui.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + selector: + matchLabels: {{- include "enterprise.common.matchLabels" (merge (dict "component" $component) .) | nindent 6 }} + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 8 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component "nil" true) .) | nindent 8 }} + {{- if and (not .Values.injectSecretsViaEnv) (not .Values.useExistingSecrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/ui_secret.yaml") . | sha256sum }} + {{- end }} + checksum/ui-config: {{ include (print $.Template.BasePath "/ui_configmap.yaml") . | sha256sum }} + spec: + {{- include "enterprise.common.podSpec" (merge (dict "component" $component) .) | indent 6 }} + volumes: {{- include "enterprise.common.extraVolumes" (merge (dict "component" $component) .) | nindent 8 }} + - name: anchore-license + secret: + secretName: {{ .Values.licenseSecretName }} + - name: anchore-ui-config + configMap: + name: {{ template "enterprise.ui.fullname" . }} + {{- with .Values.certStoreSecretName }} + - name: certs + secret: + secretName: {{ . }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "enterprise.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-{{ $component | lower }}" + image: {{ .Values.ui.image }} + imagePullPolicy: {{ .Values.ui.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "enterprise.doSourceFile" .) }} /docker-entrypoint.sh node /home/node/aui/build/server.js + env: {{- include "enterprise.common.environment" (merge (dict "component" $component) .) | nindent 12 }} + {{- if .Values.anchoreConfig.database.ssl }} + - name: PGSSLROOTCERT + value: /home/anchore/certs/{{ .Values.anchoreConfig.database.sslRootCertFileName }} + {{- end }} + {{- with .Values.ui.ldapsRootCaCertName }} + - name: NODE_EXTRA_CA_CERTS + value: /home/anchore/certs/{{- . }} + {{- end }} + envFrom: + {{- if not .Values.injectSecretsViaEnv }} + {{- if .Values.useExistingSecrets }} + - secretRef: + name: {{ .Values.ui.existingSecretName }} + {{- else }} + - secretRef: + name: {{ template "enterprise.ui.fullname" . }} + {{- end }} + {{- end }} + ports: + - containerPort: 3000 + protocol: TCP + name: {{ $component | lower }} + volumeMounts: {{- include "enterprise.common.extraVolumeMounts" (merge (dict "component" $component) .) | nindent 12 }} + - name: anchore-license + mountPath: /home/anchore/license.yaml + subPath: license.yaml + - name: anchore-ui-config + mountPath: /config/config-ui.yaml + subPath: config-ui.yaml + {{- if (.Values.certStoreSecretName) }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + livenessProbe: + tcpSocket: + port: {{ $component | lower }} + initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }} + timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.liveness.periodSeconds }} + failureThreshold: {{ .Values.probes.liveness.failureThreshold }} + successThreshold: {{ .Values.probes.liveness.successThreshold }} + readinessProbe: + httpGet: + path: /service/health + port: {{ $component | lower }} + scheme: HTTP + timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.readiness.periodSeconds }} + failureThreshold: {{ .Values.probes.readiness.failureThreshold }} + successThreshold: {{ .Values.probes.readiness.successThreshold }} + {{- with .Values.ui.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise.ui.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +spec: + sessionAffinity: {{ .Values.ui.service.sessionAffinity }} + type: {{ .Values.ui.service.type }} + ports: + - name: {{ $component | lower }} + port: {{ .Values.ui.service.port }} + protocol: TCP + targetPort: 3000 + {{ include "enterprise.service.nodePort" (merge (dict "component" $component) .) }} + selector: + app.kubernetes.io/name: {{ template "enterprise.fullname" . }} + app.kubernetes.io/component: {{ $component | lower }} diff --git a/stable/enterprise/templates/ui_secret.yaml b/stable/enterprise/templates/ui_secret.yaml new file mode 100644 index 00000000..15e28a30 --- /dev/null +++ b/stable/enterprise/templates/ui_secret.yaml @@ -0,0 +1,25 @@ +{{- if (not .Values.useExistingSecrets) -}} +{{- $component := "ui" -}} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "enterprise.ui.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "enterprise.common.labels" (merge (dict "component" $component) .) | nindent 4 }} + annotations: {{- include "enterprise.common.annotations" (merge (dict "component" $component) .) | nindent 4 }} +type: Opaque +stringData: + +{{- if .Values.anchoreConfig.database.ssl }} + ANCHORE_APPDB_URI: 'postgresql://{{- template "enterprise.ui.dbUser" . -}}:{{- template "enterprise.ui.dbPassword" . -}}@{{ template "enterprise.dbHostname" . }}/{{ index .Values "postgresql" "auth" "database" }}?ssl={{- .Values.anchoreConfig.database.sslMode -}}' +{{- else }} + ANCHORE_APPDB_URI: 'postgresql://{{- template "enterprise.ui.dbUser" . -}}:{{- template "enterprise.ui.dbPassword" . -}}@{{ template "enterprise.dbHostname" . }}/{{ index .Values "postgresql" "auth" "database" }}' +{{- end }} + +{{- if and (index .Values "ui-redis" "externalEndpoint") (not (index .Values "ui-redis" "chartEnabled")) }} + ANCHORE_REDIS_URI: '{{ index .Values "ui-redis" "externalEndpoint" }}' +{{- else }} + ANCHORE_REDIS_URI: 'redis://:{{ index .Values "ui-redis" "auth" "password" }}@{{ template "redis.fullname" . }}-master:6379' +{{- end }} +{{- end }} diff --git a/stable/enterprise/test/env_vars.yaml b/stable/enterprise/test/env_vars.yaml new file mode 100644 index 00000000..48a0bd06 --- /dev/null +++ b/stable/enterprise/test/env_vars.yaml @@ -0,0 +1,20 @@ +labels: + foo: bar + +annotations: + foo: bar + +extraEnv: +- name: foo + value: bar + +analyzer: + labels: + test: analyzer + + annotations: + test: analyzer + + extraEnv: + - name: test + value: analyzer diff --git a/stable/enterprise/tests/__snapshot__/configmap_test.yaml.snap b/stable/enterprise/tests/__snapshot__/configmap_test.yaml.snap new file mode 100644 index 00000000..2e1ee3a3 --- /dev/null +++ b/stable/enterprise/tests/__snapshot__/configmap_test.yaml.snap @@ -0,0 +1,531 @@ +should render the configmaps: + 1: | + apiVersion: v1 + data: + analyzer_config.yaml: | + # Anchore analyzer configuration + malware: + clamav: + db_update_enabled: true + enabled: false + retrieve_files: + file_list: + - /etc/passwd + secret_search: + match_params: + - MAXFILESIZE=10000 + - STOREONMATCH=n + regexp_match: + - AWS_ACCESS_KEY=(?i).*aws_access_key_id( *=+ *).*(?&1 > /dev/null) + if [[ !$err ]]; then + echo "Database is ready" + exit 0 + fi + echo "Database is not ready yet, sleeping 10 seconds..." + sleep 10 + done + command: + - /bin/bash + - -c + env: + - name: foo + value: bar + - name: bar + value: baz + - name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-999-upgrade + - name: ANCHORE_PORT + value: "null" + - name: ANCHORE_HOST_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + image: docker.io/anchore/enterprise:v5.4.0 + imagePullPolicy: IfNotPresent + name: wait-for-db diff --git a/stable/enterprise/tests/analyzer_resources_test.yaml b/stable/enterprise/tests/analyzer_resources_test.yaml new file mode 100644 index 00000000..d5ce47a4 --- /dev/null +++ b/stable/enterprise/tests/analyzer_resources_test.yaml @@ -0,0 +1,400 @@ +suite: Analyzer Resources Tests +templates: + - analyzer_configmap.yaml + - analyzer_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should render a valid analyzer config file + template: analyzer_configmap.yaml + asserts: + - matchRegex: + path: data["analyzer_config.yaml"] + pattern: (?m)^(retrieve_files|secret_search|content_search|malware):$ + + - it: should set the correct resource names + templates: + - analyzer_deployment.yaml + - analyzer_configmap.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-analyzer + + - it: should render component labels + templates: + - analyzer_deployment.yaml + - analyzer_configmap.yaml + set: + analyzer.labels: + analyzer: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + analyzer: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + analyzer: test + test: foobar + template: analyzer_deployment.yaml + + - it: should render component annotations + templates: + - analyzer_deployment.yaml + - analyzer_configmap.yaml + set: + analyzer.annotations: + analyzer: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + analyzer: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + analyzer: test + test: foobar + template: analyzer_deployment.yaml + + - it: should render component matchLabels + template: analyzer_deployment.yaml + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: analyzer + + - it: should render component serviceAccountName + template: analyzer_deployment.yaml + set: + analyzer.serviceAccountName: analyzer-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: analyzer-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: analyzer_deployment.yaml + set: + serviceAccountName: global-test + analyzer.serviceAccountName: analyzer-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: analyzer-test + + - it: should render component nodeSelector + template: analyzer_deployment.yaml + set: + analyzer.nodeSelector: + analyzer: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + analyzer: test + + - it: should render component affinity + template: analyzer_deployment.yaml + set: + analyzer.affinity: + analyzer: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + analyzer: test + + - it: should render component tolerations + template: analyzer_deployment.yaml + set: + analyzer.tolerations: + - key: "analyzer" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "analyzer" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render scratch volume details from Analyzer values + template: analyzer_deployment.yaml + set: + analyzer.scratchVolume.details: + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar + count: 1 + + - it: should render scratch volume using details from analyzer values, even when global scratchVolume is explicitly set + template: analyzer_deployment.yaml + set: + analyzer.scratchVolume.details: + hostPath: + path: /tmp + type: Directory + scratchVolume.details: + emptyDir: + medium: Memory + memoryLimit: 128Mi + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + hostPath: + path: /tmp + type: Directory + count: 1 + + - it: should render analyzer-configmap volume + template: analyzer_deployment.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: analyzer-config-volume + configMap: + name: test-release-enterprise-analyzer + count: 1 + + - it: should render component container name + template: analyzer_deployment.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-analyzer + + - it: should render component entrypoint args + template: analyzer_deployment.yaml + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade analyzer$ + count: 1 + + - it: should render component environment variables + template: analyzer_deployment.yaml + set: + analyzer.extraEnv: + - name: analyzer + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-analyzer + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8084" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: analyzer + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component ports + template: analyzer_deployment.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: analyzer + containerPort: 8084 + count: 1 + + - it: should render component volumeMounts + template: analyzer_deployment.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: analyzer-config-volume + mountPath: /anchore_service/analyzer_config.yaml + subPath: analyzer_config.yaml + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: anchore-scratch + mountPath: /analysis_scratch + count: 1 + any: true + + - it: should render component extraVolumes + template: analyzer_deployment.yaml + set: + analyzer.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: analyzer_deployment.yaml + set: + analyzer.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: analyzer_deployment.yaml + set: + analyzer.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: analyzer_deployment.yaml + set: + analyzer.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 + + - it: should render component probes + template: analyzer_deployment.yaml + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: analyzer + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: analyzer + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: analyzer_deployment.yaml + set: + analyzer.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 diff --git a/stable/enterprise/tests/api_resources_test.yaml b/stable/enterprise/tests/api_resources_test.yaml new file mode 100644 index 00000000..d67029a1 --- /dev/null +++ b/stable/enterprise/tests/api_resources_test.yaml @@ -0,0 +1,459 @@ +suite: API Resources Tests +templates: + - api_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml + - policybundle_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: api_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-api + + - it: should render component labels + template: api_deployment.yaml + set: + api.labels: + api: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + api: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + api: test + test: foobar + template: api_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: api_deployment.yaml + set: + api.annotations: + api: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + api: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + api: test + test: foobar + template: api_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: api_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: api + + - it: should render component replica count + template: api_deployment.yaml + documentIndex: 0 + set: + api.replicaCount: 2 + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should render component serviceAccountName + template: api_deployment.yaml + documentIndex: 0 + set: + api.serviceAccountName: api-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: api-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: api_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + api.serviceAccountName: api-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: api-test + + - it: should render component nodeSelector + template: api_deployment.yaml + documentIndex: 0 + set: + api.nodeSelector: + api: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + api: test + + - it: should render component affinity + template: api_deployment.yaml + documentIndex: 0 + set: + api.affinity: + api: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + api: test + + - it: should render component tolerations + template: api_deployment.yaml + documentIndex: 0 + set: + api.tolerations: + - key: "api" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "api" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render policy bundle volume when anchoreConfig.policyBundles is set + template: api_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.policyBundles: + custom_policy_bundle1.json: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: policy-bundle-volume + configMap: + name: test-release-enterprise-policy-bundles + count: 1 + + - it: should render component container name + template: api_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-api + + - it: should render component entrypoint args + template: api_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade apiext$ + count: 1 + + - it: should render api component environment variables + template: api_deployment.yaml + documentIndex: 0 + set: + api.extraEnv: + - name: api + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_CLI_PASS + valueFrom: + secretKeyRef: + name: test-release-enterprise + key: ANCHORE_ADMIN_PASSWORD + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-api + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8228" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: api + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render get cli pass ENV VAR from existing secret if useExistingSecrets=true + template: api_deployment.yaml + documentIndex: 0 + set: + useExistingSecrets: true + existingSecretName: existing-secret + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_CLI_PASS + valueFrom: + secretKeyRef: + name: existing-secret + key: ANCHORE_ADMIN_PASSWORD + + - it: should render component ports + template: api_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: api + containerPort: 8228 + count: 1 + + - it: should render policy bundle volumeMounts when a policy bundle is provided + template: api_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.policyBundles: + custom_policy_bundle1.json: {} + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: policy-bundle-volume + mountPath: /anchore_service/policies/custom_policy_bundle1.json + subPath: custom_policy_bundle1.json + count: 1 + any: true + + - it: should render policy bundle volumeMounts when multiple policy bundles are provided + template: api_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.policyBundles: + custom_policy_bundle1.json: {} + custom_policy_bundle2.json: {} + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: policy-bundle-volume + mountPath: /anchore_service/policies/custom_policy_bundle1.json + subPath: custom_policy_bundle1.json + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: policy-bundle-volume + mountPath: /anchore_service/policies/custom_policy_bundle2.json + subPath: custom_policy_bundle2.json + count: 1 + any: true + + - it: should render component extraVolumes + template: api_deployment.yaml + documentIndex: 0 + set: + api.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: api_deployment.yaml + documentIndex: 0 + set: + api.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: api_deployment.yaml + documentIndex: 0 + set: + api.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: api_deployment.yaml + documentIndex: 0 + set: + api.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 + + - it: should render component probes + template: api_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: api + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: api + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: api_deployment.yaml + documentIndex: 0 + set: + api.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component service type + template: api_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: api_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: api + port: 8228 + targetPort: 8228 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: api_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: api + count: 1 diff --git a/stable/enterprise/tests/catalog_resources_test.yaml b/stable/enterprise/tests/catalog_resources_test.yaml new file mode 100644 index 00000000..34e2f7c6 --- /dev/null +++ b/stable/enterprise/tests/catalog_resources_test.yaml @@ -0,0 +1,472 @@ +suite: Catalog Resources Tests +templates: + - catalog_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml + - policybundle_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: catalog_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-catalog + + - it: should render component labels + template: catalog_deployment.yaml + set: + catalog.labels: + catalog: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + catalog: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + catalog: test + test: foobar + template: catalog_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: catalog_deployment.yaml + set: + catalog.annotations: + catalog: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + catalog: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + catalog: test + test: foobar + template: catalog_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: catalog_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: catalog + + - it: should render component replica count + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.replicaCount: 2 + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should render component serviceAccountName + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.serviceAccountName: catalog-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: catalog-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: catalog_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + catalog.serviceAccountName: catalog-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: catalog-test + + - it: should render component nodeSelector + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.nodeSelector: + catalog: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + catalog: test + + - it: should render component affinity + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.affinity: + catalog: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + catalog: test + + - it: should render component tolerations + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.tolerations: + - key: "catalog" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "catalog" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render scratch volume details from catalog values + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.scratchVolume.details: + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar + count: 1 + + - it: should render scratch volume using details from catalog values, even when global scratchVolume is explicitly set + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.scratchVolume.details: + hostPath: + path: /tmp + type: Directory + scratchVolume.details: + emptyDir: + medium: Memory + memoryLimit: 128Mi + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + hostPath: + path: /tmp + type: Directory + count: 1 + + - it: should render policy bundle volume when anchoreConfig.policyBundles is set + template: catalog_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.policyBundles: + custom_policy_bundle1.json: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: policy-bundle-volume + configMap: + name: test-release-enterprise-policy-bundles + count: 1 + + - it: should render component container name + template: catalog_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-catalog + + - it: should render component entrypoint args + template: catalog_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade catalog$ + count: 1 + + - it: should render catalog component environment variables + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.extraEnv: + - name: catalog + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-catalog + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8082" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: catalog + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component ports + template: catalog_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: catalog + containerPort: 8082 + count: 1 + + - it: should render component volumeMounts + template: catalog_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: anchore-scratch + mountPath: /analysis_scratch + count: 1 + any: true + + - it: should render policy bundle volumeMounts when multiple policy bundles are provided + template: catalog_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.policyBundles: + custom_policy_bundle1.json: {} + custom_policy_bundle2.json: {} + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: policy-bundle-volume + mountPath: /anchore_service/policies/custom_policy_bundle1.json + subPath: custom_policy_bundle1.json + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: policy-bundle-volume + mountPath: /anchore_service/policies/custom_policy_bundle2.json + subPath: custom_policy_bundle2.json + count: 1 + any: true + + - it: should render component extraVolumes + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 + + - it: should render component probes + template: catalog_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: catalog + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: catalog + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: catalog_deployment.yaml + documentIndex: 0 + set: + catalog.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component service type + template: catalog_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: catalog_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: catalog + port: 8082 + targetPort: 8082 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: catalog_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: catalog + count: 1 diff --git a/stable/enterprise/tests/common_helpers_test.yaml b/stable/enterprise/tests/common_helpers_test.yaml new file mode 100644 index 00000000..5bf220bb --- /dev/null +++ b/stable/enterprise/tests/common_helpers_test.yaml @@ -0,0 +1,1080 @@ +suite: Common Helper Template Tests +templates: + - analyzer_deployment.yaml + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + - ui_deployment.yaml + - hooks/pre-upgrade/upgrade_job.yaml + - analyzer_configmap.yaml + - anchore_configmap.yaml + - ui_configmap.yaml + - anchore_secret.yaml + - ui_secret.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +backend_test_templates: &backend_test_templates + - analyzer_deployment.yaml + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + +test_templates: &test_templates + - analyzer_deployment.yaml + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + - ui_deployment.yaml + - hooks/pre-upgrade/upgrade_job.yaml + +deployment_templates: &deployment_templates + - analyzer_deployment.yaml + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + - ui_deployment.yaml + +tests: + - it: should render global annotations + templates: *test_templates + documentIndex: 0 + set: + annotations: + foo: bar + bar: baz + asserts: + - isSubset: + path: metadata.annotations + content: + foo: bar + bar: baz + + - it: should render file checksum/secret annotation + templates: *test_templates + documentIndex: 0 + asserts: + - exists: + path: spec.template.metadata.annotations['checksum/secrets'] + + - it: should render global labels + templates: *test_templates + documentIndex: 0 + set: + labels: + foo: bar + bar: baz + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: test-release + helm.sh/chart: enterprise-9.9.9 + app.kubernetes.io/version: 9.9.9 + app.kubernetes.io/part-of: anchore + app.kubernetes.io/managed-by: Helm + foo: bar + bar: baz + + - it: should render docker entrypoint with no doSourceAtEntry filePaths + templates: *backend_test_templates + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^\/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade + + - it: should render docker entrypoint with doSourceAtEntry and no filePaths + templates: *backend_test_templates + documentIndex: 0 + set: + doSourceAtEntry.enabled: true + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade + + - it: should render docker entrypoint with doSourceAtEntry and some filePaths + templates: *test_templates + documentIndex: 0 + set: + doSourceAtEntry.enabled: true + doSourceAtEntry.filePaths: ["myscript.sh", "myotherscript.sh"] + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^if \[ -f myscript\.sh \];then source myscript\.sh;fi;if \[ -f myotherscript\.sh \];then source myotherscript\.sh;fi; .*$ + + - it: should render envFrom without an existing secret + templates: + - analyzer_deployment.yaml + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - simplequeue_deployment.yaml + - hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0] + content: + envFrom: + - configMapRef: + name: test-release-enterprise-config-env-vars + - secretRef: + name: test-release-enterprise + + - it: should render envFrom with an existing secret + set: + useExistingSecrets: true + existingSecretName: "myExistingSecretName" + templates: + - analyzer_deployment.yaml + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - simplequeue_deployment.yaml + - hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0] + content: + envFrom: + - configMapRef: + name: test-release-enterprise-config-env-vars + - secretRef: + name: myExistingSecretName + + - it: should not render secretRef when injecting secrets via env + set: + injectSecretsViaEnv: true + useExistingSecrets: true + existingSecretName: "myExistingSecretName" + templates: + - analyzer_deployment.yaml + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - simplequeue_deployment.yaml + - hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + asserts: + - isNotSubset: + path: spec.template.spec.containers[0] + content: + envFrom: + - configMapRef: + name: test-release-enterprise-config-env-vars + - secretRef: + name: myExistingSecretName + + - it: should render correct environment variables when extraEnv is set + templates: *test_templates + documentIndex: 0 + set: + extraEnv: + - name: foo + value: bar + - name: bar + value: baz + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: foo + value: bar + - contains: + path: spec.template.spec.containers[0].env + content: + name: bar + value: baz + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_HOST_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + + - it: should render clouddsql container for all deployments and default upgrade job + templates: *test_templates + documentIndex: 0 + set: + cloudsql.enabled: true + asserts: + - contains: + path: spec.template.spec.containers + content: + name: cloudsql-proxy + command: + - /cloud_sql_proxy + args: + - "-instances==tcp:5432" + count: 1 + any: true + + - it: should render clouddsql container and extraArgs for all deployments and default upgrade job + templates: *test_templates + documentIndex: 0 + set: + cloudsql.enabled: true + cloudsql.extraArgs: + - myExtraArg + - myOtherExtraArg + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: cloudsql-proxy + count: 1 + - equal: + path: spec.template.spec.containers[0].command + value: [ /cloud_sql_proxy ] + count: 1 + - equal: + path: spec.template.spec.containers[0].args + value: + - "-instances==tcp:5432" + - "myExtraArg" + - "myOtherExtraArg" + count: 1 + + - it: should render clouddsql container, additional arg, and volumeMount when useExistingServiceAcc is true for all deployments and default upgrade job + templates: *test_templates + documentIndex: 0 + set: + cloudsql.enabled: true + cloudsql.extraArgs: + - myExtraArg + - myOtherExtraArg + cloudsql.useExistingServiceAcc: true + cloudsql.serviceAccSecretName: cloudsqlServiceAccSecretName + cloudsql.serviceAccJsonName: cloudsqlServiceAccJsonName + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: cloudsql-proxy + count: 1 + - equal: + path: spec.template.spec.containers[0].args + value: + - "-instances==tcp:5432" + - "myExtraArg" + - "myOtherExtraArg" + - "-credential_file=/var/cloudsqlServiceAccSecretName/cloudsqlServiceAccJsonName" + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: "/var/cloudsqlServiceAccSecretName" + name: "cloudsqlServiceAccSecretName" + readOnly: true + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: "cloudsqlServiceAccSecretName" + secret: + secretName: "cloudsqlServiceAccSecretName" + count: 1 + + - it: should render fixPermissionsInitContainer + set: + scratchVolume.fixGroupPermissions: true + securityContext.fsGroup: 9999 + templates: + - analyzer_deployment.yaml + - catalog_deployment.yaml + - policyengine_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.initContainers + content: + name: mode-fixer + volumeMounts: + - name: "anchore-scratch" + mountPath: /analysis_scratch + command: [ sh, -c, (chmod 0775 /analysis_scratch; chgrp 9999 /analysis_scratch ) ] + count: 1 + any: true + + - it: should not render fixPermissionsInitContainer + set: + scratchVolume.fixGroupPermissions: false + securityContext.fsGroup: 9999 + templates: + - analyzer_deployment.yaml + - catalog_deployment.yaml + - policyengine_deployment.yaml + documentIndex: 0 + asserts: + - notExists: + path: spec.template.spec.initContainers + + - it: should render liveness probe + templates: *deployment_templates + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + + - it: should render readiness probe + templates: *deployment_templates + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + + - it: should render pod security contexts + set: + securityContext.runAsUser: 9999 + templates: *deployment_templates + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.securityContext + content: + runAsUser: 9999 + runAsGroup: 1000 + fsGroup: 1000 + + - it: should render pod serviceAccountName + set: + serviceAccountName: myServiceAccountName + templates: *deployment_templates + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: myServiceAccountName + + - it: should not render pod serviceAccountName if not defined + templates: *deployment_templates + documentIndex: 0 + asserts: + - notExists: + path: spec.template.spec.serviceAccountName + + - it: should render imagePullSecretName + templates: *test_templates + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: anchore-enterprise-pullcreds + + - it: should render set imagePullSecretName + set: + imagePullSecretName: mysecret + templates: *test_templates + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: mysecret + + - it: should render container security contexts + set: + containerSecurityContext.runAsUser: 9999 + containerSecurityContext.runAsGroup: 9998 + templates: *deployment_templates + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].securityContext + content: + runAsUser: 9999 + runAsGroup: 9998 + + - it: should not render container security contexts if none set + templates: *deployment_templates + documentIndex: 0 + asserts: + - notExists: + path: spec.template.spec.containers[0].securityContext + + - it: should render volumeMounts + templates: *backend_test_templates + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: anchore-license + mountPath: /home/anchore/license.yaml + subPath: license.yaml + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: config-volume + mountPath: /config/config.yaml + subPath: config.yaml + count: 1 + any: true + + - it: should render extraVolumeMounts + set: + extraVolumeMounts: + - name: my-volume + mountPath: /my/path + subPath: my-subpath + - name: my-volume-two + mountPath: /my/other/path + subPath: my-other-subpath + templates: *deployment_templates + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: my-volume + mountPath: /my/path + subPath: my-subpath + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: my-volume-two + mountPath: /my/other/path + subPath: my-other-subpath + count: 1 + any: true + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: certs + count: 1 + any: true + + - it: should render emptyDir scratchVolume if no global or component specific values are set + templates: + - analyzer_deployment.yaml + - catalog_deployment.yaml + - policyengine_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + emptyDir: {} + count: 1 + any: true + + - it: should render global scratchVolume if no component specific values are set + templates: + - analyzer_deployment.yaml + - catalog_deployment.yaml + - policyengine_deployment.yaml + set: + scratchVolume.details: + hostPath: + path: /tmp + type: Directory + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + hostPath: + path: /tmp + type: Directory + count: 1 + any: true + + - it: should render certStore volumeMount and volume + set: + certStoreSecretName: mycerts + templates: *backend_test_templates + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: certs + mountPath: /home/anchore/certs/ + readOnly: true + count: 1 + any: true + - contains: + path: spec.template.spec.volumes + content: + name: certs + secret: + secretName: mycerts + count: 1 + any: true + + - it: should render global extraVolumes + set: + extraVolumes: + - name: my-volume + emptyDir: {} + - name: my-volume-two + emptyDir: {} + templates: *deployment_templates + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: my-volume + emptyDir: {} + count: 1 + any: true + - contains: + path: spec.template.spec.volumes + content: + name: my-volume-two + emptyDir: {} + count: 1 + any: true + - notContains: + path: spec.template.spec.volumes + content: + name: certs + count: 1 + any: true + + - it: should render global volumes anchore-license, and config-volume + templates: *backend_test_templates + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-license + secret: + secretName: anchore-enterprise-license + count: 1 + any: true + - contains: + path: spec.template.spec.volumes + content: + name: config-volume + configMap: + name: test-release-enterprise + count: 1 + any: true + + - it: should render enterprise.fullname + templates: *test_templates + documentIndex: 0 + asserts: + - matchRegex: + path: metadata.name + pattern: ^test-release-enterprise + + - it: should render enterprise.fullname with global.fullnameOverride for services + set: + global.fullnameOverride: my-fullname-override + templates: + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + - ui_deployment.yaml + documentIndex: 1 + asserts: + - matchRegex: + path: spec.selector["app.kubernetes.io/name"] + pattern: ^my-fullname-override + + - it: should render enterprise.fullname with global.nameOverride for services + set: + global.nameOverride: my-name-override + templates: + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + - ui_deployment.yaml + documentIndex: 1 + asserts: + - matchRegex: + path: spec.selector["app.kubernetes.io/name"] + pattern: ^test-release-my-name-override + + - it: should render dbHostname for anchore_secret.yaml + templates: + - anchore_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_DB_HOST: test-release-postgresql + count: 1 + any: true + + # since postgresql is enabled, should still use the chart postgresql service name + - it: should render dbHostname with externalEndpoint defined and postgresql enabled + set: + postgresql: + externalEndpoint: my-endpoint + enabled: true + cloudsql: + enabled: true + templates: + - anchore_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_DB_HOST: test-release-postgresql + count: 1 + any: true + - isNotSubset: + path: stringData + content: + ANCHORE_DB_HOST: localhost + + - it: should render dbHostname with externalEndpoint defined and postgresql disabled + set: + postgresql: + externalEndpoint: my-endpoint + enabled: false + templates: + - anchore_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_DB_HOST: my-endpoint + count: 1 + any: true + + - it: should render dbHostname with cloudsql enabled and postgresql disabled + set: + postgresql: + enabled: false + cloudsql: + enabled: true + templates: + - anchore_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_DB_HOST: localhost + count: 1 + any: true + + - it: should render default dbHostname with postgresql disabled, postgresql externalEndpoint not defined, and cloudsql disabled + set: + postgresql: + enabled: false + externalEndpoint: "" + cloudsql: + enabled: false + templates: + - anchore_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_DB_HOST: test-release-postgresql + count: 1 + any: true + + - it: should render dbHostname for ui_secret.yaml + templates: + - ui_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_APPDB_URI: postgresql://anchore:anchore-postgres,123@test-release-postgresql/anchore + count: 1 + any: true + + - it: should render dbHostname with externalEndpoint defined and postgresql enabled for ui_secret.yaml + set: + postgresql: + externalEndpoint: my-endpoint + enabled: true + cloudsql: + enabled: true + templates: + - ui_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_APPDB_URI: postgresql://anchore:anchore-postgres,123@test-release-postgresql/anchore + count: 1 + any: true + - isNotSubset: + path: stringData + content: + ANCHORE_APPDB_URI: postgresql://anchore:anchore-postgres,123@localhost/anchore + + - it: should render dbHostname with externalEndpoint defined and postgresql disabled for ui_secret.yaml + set: + postgresql: + externalEndpoint: my-endpoint + enabled: false + templates: + - ui_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_APPDB_URI: postgresql://anchore:anchore-postgres,123@my-endpoint/anchore + count: 1 + any: true + + - it: should render dbHostname with cloudsql enabled and postgresql disabled for ui_secret.yaml + set: + postgresql: + enabled: false + cloudsql: + enabled: true + templates: + - ui_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_APPDB_URI: postgresql://anchore:anchore-postgres,123@localhost/anchore + count: 1 + any: true + + - it: should render default dbHostname with postgresql disabled, postgresql externalEndpoint not defined, and cloudsql disabled for ui_secret.yaml + set: + postgresql: + enabled: false + externalEndpoint: "" + cloudsql: + enabled: false + templates: + - ui_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_APPDB_URI: postgresql://anchore:anchore-postgres,123@test-release-postgresql/anchore + count: 1 + any: true + + - it: should render default_admin_password for anchore_secret.yaml setting anchoreConfig.default_admin_password + set: + anchoreConfig.default_admin_password: my-password + templates: + - anchore_secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_ADMIN_PASSWORD: my-password + ANCHORECTL_PASSWORD: my-password + count: 1 + any: true + + - it: should render default_admin_password for anchore_secret.yaml + templates: + - anchore_secret.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: stringData["ANCHORE_ADMIN_PASSWORD"] + pattern: ^[a-zA-Z0-9]{32}$ + + - it: should render anchoreFeedsURL with feeds.url set + set: + feeds: + url: my-feeds-url + templates: + - anchore_configmap.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: data["config.yaml"] + pattern: "url: my-feeds-url/v2/databases/grypedb" + + - matchRegex: + path: data["config.yaml"] + pattern: "url: my-feeds-url/v2/feeds" + + - it: should render v2 anchoreFeedsURL with feeds.url set + set: + feeds: + url: my-feeds-url + service: + apiVersion: v2 + templates: + - anchore_configmap.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: data["config.yaml"] + pattern: "url: my-feeds-url/v2/databases/grypedb" + + - matchRegex: + path: data["config.yaml"] + pattern: "url: my-feeds-url/v2/feeds" + + - it: should render anchoreFeedsURL with feeds.chartEnabled + set: + feeds: + chartEnabled: true + templates: + - anchore_configmap.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/databases/grypedb" + + - matchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/feeds" + + - it: should render grypeProviderURL with anchoreConfig.policy_engine.overrideFeedsToUpstream + set: + anchoreConfig.policy_engine: + overrideFeedsToUpstream: true + templates: + - anchore_configmap.yaml + documentIndex: 0 + asserts: + - notMatchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/databases/grypedb" + + - matchRegex: + path: data["config.yaml"] + pattern: "url: https://toolbox-data.anchore.io/grype/databases/listing.json" + + - it: should render grypeProviderURL without feeds.url defined and feeds.chartEnabled as false + set: + feeds: + url: "" + chartEnabled: false + templates: + - anchore_configmap.yaml + documentIndex: 0 + asserts: + - notMatchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/databases/grypedb" + + - matchRegex: + path: data["config.yaml"] + pattern: "url: https://toolbox-data.anchore.io/grype/databases/listing.json" + + - it: should set the correct protocol with feeds.anchoreConfig.internalServicesSSL.enabled to true for anchore_configmap.yaml + set: + feeds.anchoreConfig.internalServicesSSL.enabled: true + templates: + - anchore_configmap.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: data["config.yaml"] + pattern: "url: https://test-release-feeds:8448/v2/databases/grypedb" + + - matchRegex: + path: data["config.yaml"] + pattern: "url: https://test-release-feeds:8448/v2/feeds" + + - notMatchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/databases/grypedb" + + - notMatchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/feeds" + + - it: should set the correct protocol with anchoreConfig.internalServicesSSL.enabled to true for deployments + set: + anchoreConfig.internalServicesSSL.enabled: true + templates: + - catalog_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe.httpGet + content: + scheme: HTTPS + + - it: should set the correct protocol with feeds.anchoreConfig.internalServicesSSL.enabled to false for anchore_configmap.yaml + set: + anchoreConfig.internalServicesSSL.enabled: false + templates: + - anchore_configmap.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/databases/grypedb" + + - matchRegex: + path: data["config.yaml"] + pattern: "url: http://test-release-feeds:8448/v2/feeds" + + - notMatchRegex: + path: data["config.yaml"] + pattern: "url: https://test-release-feeds:8448/v2/databases/grypedb" + + - notMatchRegex: + path: data["config.yaml"] + pattern: "url: https://test-release-feeds:8448/v2/feeds" + + - it: should set the correct protocol with anchoreConfig.internalServicesSSL.enabled to false for deployments + set: + anchoreConfig.internalServicesSSL.enabled: false + templates: + - catalog_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe.httpGet + content: + scheme: HTTP + + - it: should set the correct protocol with anchoreConfig.internalServicesSSL.enabled to false for ui_configmap.yaml + set: + anchoreConfig.internalServicesSSL.enabled: false + templates: + - ui_configmap.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: data["config-ui.yaml"] + pattern: "enterprise_uri: 'http://test-release-enterprise-api:8228/v2'" + - matchRegex: + path: data["config-ui.yaml"] + pattern: "notifications_uri: 'http://test-release-enterprise-api:8228/v2'" + - matchRegex: + path: data["config-ui.yaml"] + pattern: "reports_uri: 'http://test-release-enterprise-api:8228/v2'" + + - it: should set the correct protocol with anchoreConfig.internalServicesSSL.enabled to true for ui_configmap.yaml + set: + anchoreConfig.internalServicesSSL.enabled: true + templates: + - ui_configmap.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: data["config-ui.yaml"] + pattern: "enterprise_uri: 'https://test-release-enterprise-api:8228/v2'" + - matchRegex: + path: data["config-ui.yaml"] + pattern: "notifications_uri: 'https://test-release-enterprise-api:8228/v2'" + - matchRegex: + path: data["config-ui.yaml"] + pattern: "reports_uri: 'https://test-release-enterprise-api:8228/v2'" + + - it: should render nodePort when set + set: + api.service.nodePort: 9999 + catalog.service.nodePort: 9999 + notifications.service.nodePort: 9999 + policyEngine.service.nodePort: 9999 + reports.service.nodePort: 9999 + reportsWorker.service.nodePort: 9999 + simpleQueue.service.nodePort: 9999 + ui.service.nodePort: 9999 + templates: + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + - ui_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.ports[0] + content: + protocol: TCP + nodePort: 9999 + count: 1 + + - it: should not render nodePort by default + templates: + - api_deployment.yaml + - catalog_deployment.yaml + - notifications_deployment.yaml + - policyengine_deployment.yaml + - reports_deployment.yaml + - reportsworker_deployment.yaml + - simplequeue_deployment.yaml + - ui_deployment.yaml + documentIndex: 1 + asserts: + - isNotSubset: + path: spec.ports[0] + content: + nodePort: 9999 + count: 1 diff --git a/stable/enterprise/tests/configmap_test.yaml b/stable/enterprise/tests/configmap_test.yaml new file mode 100644 index 00000000..3433c48e --- /dev/null +++ b/stable/enterprise/tests/configmap_test.yaml @@ -0,0 +1,137 @@ +suite: ConfigMap Tests +templates: + - templates/analyzer_configmap.yaml + - templates/anchore_configmap.yaml + - templates/envvars_configmap.yaml + - templates/policybundle_configmap.yaml + - templates/scripts_configmap.yaml + - templates/ui_configmap.yaml +values: + - values.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 +tests: + - it: should render the configmaps + asserts: + - matchSnapshot: {} + + - it: should render a custom config when configOverride is set + template: templates/anchore_configmap.yaml + set: + configOverride: | + service_dir: /foobar + tmp_dir: /test + + log_level: DEBUG + asserts: + - matchRegex: + path: data["config.yaml"] + pattern: | + # Anchore Service Configuration File, mounted from a configmap + # + service_dir: /foobar + tmp_dir: /test + + log_level: DEBUG + + - it: should set the msrc and github drivers if set + template: templates/envvars_configmap.yaml + set: + feeds.anchoreConfig.feeds.drivers.github.enabled: true + feeds.anchoreConfig.feeds.drivers.msrc.enabled: true + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "true" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "true" + + - it: should not throw a templating error if feeds.something is set but drivers are not + template: templates/envvars_configmap.yaml + set: + feeds.chartEnabled: false + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "false" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "false" + + - it: should set the msrc and github drivers if set differently + template: templates/envvars_configmap.yaml + set: + feeds.anchoreConfig.feeds.drivers.github.enabled: false + feeds.anchoreConfig.feeds.drivers.msrc.enabled: true + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "true" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "false" + + - it: should not throw a templating error if feeds.anchoreConfig.something is set but drivers are not + template: templates/envvars_configmap.yaml + set: + feeds.anchoreConfig.log_level: ERROR + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "false" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "false" + + - it: should not throw a templating error if feeds.anchoreConfig.feeds.something is set but drivers are not + template: templates/envvars_configmap.yaml + set: + feeds.anchoreConfig.feeds.cycle_timers.driver_sync: 9001 + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "false" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "false" + + - it: should not throw a templating error if feeds.anchoreConfig.feeds.drivers.something is set but github and msrc drivers are not + template: templates/envvars_configmap.yaml + set: + feeds.anchoreConfig.feeds.drivers.npm.enabled: true + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "false" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "false" + + - it: should set the msrc and github drivers correctly if only one is set + template: templates/envvars_configmap.yaml + set: + feeds.anchoreConfig.feeds.drivers.github.enabled: true + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "false" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "true" + + - it: should set the msrc and github drivers correctly if only the other is set + template: templates/envvars_configmap.yaml + set: + feeds.anchoreConfig.feeds.drivers.msrc.enabled: true + asserts: + - equal: + path: data["ANCHORE_FEEDS_DRIVER_MSRC_ENABLED"] + value: "true" + - equal: + path: data["ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED"] + value: "false" \ No newline at end of file diff --git a/stable/enterprise/tests/ingress_resources_test.yaml b/stable/enterprise/tests/ingress_resources_test.yaml new file mode 100644 index 00000000..11e157f2 --- /dev/null +++ b/stable/enterprise/tests/ingress_resources_test.yaml @@ -0,0 +1,30 @@ +suite: Ingress Resources Tests +templates: + - ingress.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should render classname if annotation is not set + set: + ingress.enabled: true + template: ingress.yaml + asserts: + - equal: + path: spec.ingressClassName + value: nginx + + - it: should not render classname if annotation is set + set: + ingress.enabled: true + ingress: + annotations: + "kubernetes.io/ingress.class": "nginx" + template: ingress.yaml + asserts: + - notExists: + path: spec.ingressClassName diff --git a/stable/enterprise/tests/notifications_resources_test.yaml b/stable/enterprise/tests/notifications_resources_test.yaml new file mode 100644 index 00000000..9dcc13c5 --- /dev/null +++ b/stable/enterprise/tests/notifications_resources_test.yaml @@ -0,0 +1,377 @@ +suite: Notifications Resources Tests +templates: + - notifications_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: notifications_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-notifications + + - it: should render component labels + template: notifications_deployment.yaml + set: + notifications.labels: + notifications: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + notifications: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + notifications: test + test: foobar + template: notifications_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: notifications_deployment.yaml + set: + notifications.annotations: + notifications: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + notifications: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + notifications: test + test: foobar + template: notifications_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: notifications_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: notifications + + - it: should render component replica count + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.replicaCount: 2 + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should render component serviceAccountName + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.serviceAccountName: notifications-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: notifications-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: notifications_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + notifications.serviceAccountName: notifications-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: notifications-test + + - it: should render component nodeSelector + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.nodeSelector: + notifications: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + notifications: test + + - it: should render component affinity + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.affinity: + notifications: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + notifications: test + + - it: should render component tolerations + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.tolerations: + - key: "notifications" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "notifications" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render component container name + template: notifications_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-notifications + + - it: should render component entrypoint args + template: notifications_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade notifications$ + count: 1 + + - it: should render notifications component environment variables + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.extraEnv: + - name: notifications + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-notifications + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8668" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: notifications + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component extraVolumes + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 + + - it: should render component ports + template: notifications_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: notifications + containerPort: 8668 + count: 1 + + - it: should render component probes + template: notifications_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: notifications + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: notifications + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: notifications_deployment.yaml + documentIndex: 0 + set: + notifications.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component service type + template: notifications_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: notifications_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: notifications + port: 8668 + targetPort: 8668 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: notifications_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: notifications + count: 1 diff --git a/stable/enterprise/tests/policyengine_resources_test.yaml b/stable/enterprise/tests/policyengine_resources_test.yaml new file mode 100644 index 00000000..6a2f256f --- /dev/null +++ b/stable/enterprise/tests/policyengine_resources_test.yaml @@ -0,0 +1,431 @@ +suite: Catalog Resources Tests +templates: + - policyengine_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: policyengine_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-policy + + - it: should render component labels + template: policyengine_deployment.yaml + set: + policyEngine.labels: + policyEngine: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + policyEngine: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + policyEngine: test + test: foobar + template: policyengine_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: policyengine_deployment.yaml + set: + policyEngine.annotations: + policyEngine: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + policyEngine: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + policyEngine: test + test: foobar + template: policyengine_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: policyengine_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: policyengine + + - it: should render component replica count + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.replicaCount: 2 + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should render component serviceAccountName + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.serviceAccountName: policyEngine-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: policyEngine-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: policyengine_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + policyEngine.serviceAccountName: policyEngine-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: policyEngine-test + + - it: should render component nodeSelector + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.nodeSelector: + policyEngine: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + policyEngine: test + + - it: should render component affinity + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.affinity: + policyEngine: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + policyEngine: test + + - it: should render component tolerations + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.tolerations: + - key: "policyEngine" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "policyEngine" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render scratch volume details from policyEngine values + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.scratchVolume.details: + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar + count: 1 + + - it: should render scratch volume using details from policyEngine values, even when global scratchVolume is explicitly set + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.scratchVolume.details: + hostPath: + path: /tmp + type: Directory + scratchVolume.details: + emptyDir: + medium: Memory + memoryLimit: 128Mi + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + hostPath: + path: /tmp + type: Directory + count: 1 + + - it: should render component container name + template: policyengine_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-policyengine + + - it: should render component entrypoint args + template: policyengine_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade policy_engine$ + count: 1 + + - it: should render policyEngine component environment variables + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.extraEnv: + - name: policyEngine + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-policy + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8087" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: policyEngine + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component ports + template: policyengine_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: policyengine + containerPort: 8087 + count: 1 + + - it: should render component volumeMounts + template: policyengine_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: anchore-scratch + mountPath: /analysis_scratch + count: 1 + any: true + + - it: should render component extraVolumes + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 + + - it: should render component probes + template: policyengine_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: policyengine + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: policyengine + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: policyengine_deployment.yaml + documentIndex: 0 + set: + policyEngine.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component service type + template: policyengine_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: policyengine_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: policyengine + port: 8087 + targetPort: 8087 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: policyengine_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: policyengine + count: 1 diff --git a/stable/enterprise/tests/posthook_upgrade_resources_test.yaml b/stable/enterprise/tests/posthook_upgrade_resources_test.yaml new file mode 100644 index 00000000..36997aa8 --- /dev/null +++ b/stable/enterprise/tests/posthook_upgrade_resources_test.yaml @@ -0,0 +1,314 @@ +suite: Posthook Upgrade Job Tests +templates: + - hooks/post-upgrade/upgrade_job.yaml + - anchore_secret.yaml +values: + - values.yaml +set: + upgradeJob.enabled: true + upgradeJob.usePostUpgradeHook: true +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 +tests: + - it: post-upgrade hook job gets created when upgradeJob.usePostUpgradeHook is true + template: hooks/post-upgrade/upgrade_job.yaml + asserts: + - containsDocument: + kind: Job + apiVersion: batch/v1 + name: test-release-enterprise-999-upgrade + namespace: test-namespace + + - it: post-upgrade hook job does not get created when upgradeJob.usePostUpgradeHook is false + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob: + usePostUpgradeHook: false + asserts: + - hasDocuments: + count: 0 + + - it: post-upgrade hook job does not get created when upgradeJob.enabled is false + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob: + enabled: false + asserts: + - hasDocuments: + count: 0 + + - it: should render component labels + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.labels: + upgradeJob: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + upgradeJob: test + test: foobar + + - it: should render component annotations + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.annotations: + upgradeJob: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + upgradeJob: test + test: foobar + + - it: should render helm hook annotations + template: hooks/post-upgrade/upgrade_job.yaml + asserts: + - isSubset: + path: metadata.annotations + content: + "helm.sh/hook": post-upgrade + "helm.sh/hook-weight": "0" + + - it: should not render helm hook annotations when upgradeJob.force is true + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.force: true + asserts: + - isNotSubset: + path: metadata.annotations + content: + "helm.sh/hook": post-upgrade + "helm.sh/hook-weight": "0" + + - it: should render file checksum/secret annotation + template: hooks/post-upgrade/upgrade_job.yaml + asserts: + - exists: + path: spec.template.metadata.annotations['checksum/secrets'] + + - it: should render component serviceAccountName + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.serviceAccountName: upgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: upgradeJob-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: hooks/post-upgrade/upgrade_job.yaml + set: + serviceAccountName: global-test + upgradeJob.serviceAccountName: upgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: upgradeJob-test + + - it: should render component nodeSelector + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.nodeSelector: + upgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + upgradeJob: test + + - it: should render component affinity + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.affinity: + upgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + upgradeJob: test + + - it: should render component tolerations + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.tolerations: + - key: "upgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "upgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render proper database endpoint on entrypoint + template: hooks/post-upgrade/upgrade_job.yaml + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-connect postgresql:\/\/\"\$\{ANCHORE_DB_USER\}\":\"\$\{ANCHORE_DB_PASSWORD\}\"@\"\$\{ANCHORE_DB_HOST\}\":\"\$\{ANCHORE_DB_PORT\}\"\/\"\$\{ANCHORE_DB_NAME\}\" upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is require + template: hooks/post-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: require + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_DB_USER\}\":\"\$\{ANCHORE_DB_PASSWORD\}\"@\"\$\{ANCHORE_DB_HOST\}\":\"\$\{ANCHORE_DB_PORT\}\"\/\"\$\{ANCHORE_DB_NAME\}\"\?sslmode\=require upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is verify-full + template: hooks/post-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: verify-full + anchoreConfig.database.sslRootCertFileName: test-root.crt + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_DB_USER\}\":\"\$\{ANCHORE_DB_PASSWORD\}\"@\"\$\{ANCHORE_DB_HOST\}\":\"\$\{ANCHORE_DB_PORT\}\"\/\"\$\{ANCHORE_DB_NAME\}\"\?sslmode\=verify-full\\&sslrootcert\=\/home\/anchore\/certs\/test-root\.crt upgrade --dontask; + count: 1 + + - it: should render upgradeJob component environment variables + template: hooks/post-upgrade/upgrade_job.yaml + set: + upgradeJob.extraEnv: + - name: upgradeJob + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: upgradeJob + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component resource requests & limits + template: hooks/post-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component extraVolumes + template: hooks/post-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: hooks/post-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: hooks/post-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: hooks/post-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 diff --git a/stable/enterprise/tests/prehook_upgrade_resources_test.yaml b/stable/enterprise/tests/prehook_upgrade_resources_test.yaml new file mode 100644 index 00000000..35585f40 --- /dev/null +++ b/stable/enterprise/tests/prehook_upgrade_resources_test.yaml @@ -0,0 +1,390 @@ +suite: PreHook Upgrade Job Tests +templates: + - templates/hooks/pre-upgrade/upgrade_job.yaml + - templates/hooks/pre-upgrade/upgrade_rbac.yaml + - anchore_secret.yaml +values: + - values.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +upgrade-resource: &upgrade-resources + - templates/hooks/pre-upgrade/upgrade_job.yaml + - templates/hooks/pre-upgrade/upgrade_rbac.yaml + +tests: + - it: rbac should match snapshot + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + asserts: + - matchSnapshot: {} + + - it: should render helm hook annotations on rbac + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + asserts: + - isSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "0" + + - it: should not render helm hook annotations on rbac when upgradeJob.force is true + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + set: + upgradeJob.force: true + asserts: + - isNotSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "0" + + - it: pre-hook rbac gets created by default + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + asserts: + - containsDocument: + kind: ServiceAccount + apiVersion: v1 + name: test-release-enterprise-upgrade-sa + namespace: test-namespace + documentIndex: 0 + - containsDocument: + kind: RoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + name: test-release-enterprise-upgrade-role-binding + namespace: test-namespace + documentIndex: 1 + - containsDocument: + kind: Role + apiVersion: rbac.authorization.k8s.io/v1 + name: test-release-enterprise-upgrade-role + namespace: test-namespace + documentIndex: 2 + + - it: pre-hook job does not get created when pre-upgrade hook is enabled + templates: *upgrade-resources + set: + upgradeJob: + usePostUpgradeHook: true + asserts: + - hasDocuments: + count: 0 + + - it: pre-hook job does not get created when upgradeJob.enabled is false + templates: *upgrade-resources + set: + upgradeJob: + enabled: false + asserts: + - hasDocuments: + count: 0 + + - it: should render component labels + templates: *upgrade-resources + set: + upgradeJob.labels: + upgradeJob: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + upgradeJob: test + test: foobar + + - it: should render component annotations + templates: *upgrade-resources + set: + upgradeJob.annotations: + upgradeJob: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + upgradeJob: test + test: foobar + + - it: pre-hook job gets created by default + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - containsDocument: + kind: Job + apiVersion: batch/v1 + name: test-release-enterprise-999-upgrade + namespace: test-namespace + + - it: should render helm hook annotations + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - isSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "3" + + - it: should not render helm hook annotations when upgradeJob.force is true + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + upgradeJob.force: true + asserts: + - isNotSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "3" + + - it: should render file checksum/secret annotation + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - exists: + path: spec.template.metadata.annotations['checksum/secrets'] + + - it: should render component serviceAccountName + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + upgradeJob.serviceAccountName: upgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: upgradeJob-test + + - it: should render service account name when global serviceAccountName is set and upgradeJob.rbacCreate is true + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + serviceAccountName: upgradeJob-global-test + upgradeJob.rbacCreate: true + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: test-release-enterprise-upgrade-sa + + - it: should render global serviceAccountName when upgradeJob.rbacCreate is false + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + serviceAccountName: upgradeJob-global-test + upgradeJob.rbacCreate: false + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: upgradeJob-global-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + serviceAccountName: global-test + upgradeJob.serviceAccountName: upgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: upgradeJob-test + + - it: should render component nodeSelector + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + upgradeJob.nodeSelector: + upgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + upgradeJob: test + + - it: should render component affinity + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + upgradeJob.affinity: + upgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + upgradeJob: test + + - it: should render component tolerations + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + upgradeJob.tolerations: + - key: "upgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "upgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render proper initContainers + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - matchSnapshot: + path: spec.template.spec.initContainers + + - it: should render proper database endpoint on entrypoint + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-connect postgresql:\/\/\"\$\{ANCHORE_DB_USER\}\":\"\$\{ANCHORE_DB_PASSWORD\}\"@\"\$\{ANCHORE_DB_HOST\}\":\"\$\{ANCHORE_DB_PORT\}\"\/\"\$\{ANCHORE_DB_NAME\}\" upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is require + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: require + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_DB_USER\}\":\"\$\{ANCHORE_DB_PASSWORD\}\"@\"\$\{ANCHORE_DB_HOST\}\":\"\$\{ANCHORE_DB_PORT\}\"\/\"\$\{ANCHORE_DB_NAME\}\"\?sslmode\=require upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is verify-full + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: verify-full + anchoreConfig.database.sslRootCertFileName: test-root.crt + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_DB_USER\}\":\"\$\{ANCHORE_DB_PASSWORD\}\"@\"\$\{ANCHORE_DB_HOST\}\":\"\$\{ANCHORE_DB_PORT\}\"\/\"\$\{ANCHORE_DB_NAME\}\"\?sslmode\=verify-full\\&sslrootcert\=\/home\/anchore\/certs\/test-root\.crt upgrade --dontask; + count: 1 + + - it: should render upgradeJob component environment variables + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + upgradeJob.extraEnv: + - name: upgradeJob + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: upgradeJob + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component resource requests & limits + template: templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component extraVolumes + template: hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + upgradeJob.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 diff --git a/stable/enterprise/tests/reports_resources_test.yaml b/stable/enterprise/tests/reports_resources_test.yaml new file mode 100644 index 00000000..e869282e --- /dev/null +++ b/stable/enterprise/tests/reports_resources_test.yaml @@ -0,0 +1,492 @@ +suite: Reports Resources Tests +templates: + - reports_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: reports_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-reports + + - it: should render component labels + template: reports_deployment.yaml + set: + reports.labels: + reports: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + reports: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + reports: test + test: foobar + template: reports_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: reports_deployment.yaml + set: + reports.annotations: + reports: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + reports: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + reports: test + test: foobar + template: reports_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: reports_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: reports + + - it: should render component replica count + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.replicaCount: 2 + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should render component serviceAccountName + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.serviceAccountName: reports-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: reports-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: reports_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + reports.serviceAccountName: reports-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: reports-test + + - it: should render component nodeSelector + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.nodeSelector: + reports: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + reports: test + + - it: should render component affinity + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.affinity: + reports: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + reports: test + + - it: should render component tolerations + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.tolerations: + - key: "reports" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "reports" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render component container name + template: reports_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-reports + + - it: should render component entrypoint args + template: reports_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade reports$ + count: 1 + + - it: should render reports component environment variables + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.extraEnv: + - name: reports + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-reports + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8558" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: reports + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component ports + template: reports_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: reports + containerPort: 8558 + count: 1 + + - it: should render component probes + template: reports_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: reports + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: reports + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component service type + template: reports_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: reports_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: reports + port: 8558 + targetPort: 8558 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: reports_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: reports + count: 1 + + - it: should render emptyDir scratch volume when use_volume is enabled + template: reports_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.reports.use_volume: true + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + emptyDir: {} + count: 1 + + - it: should render scratch volume details from global values when use_volume is enabled + template: reports_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.reports.use_volume: true + scratchVolume.details: + emptyDir: + medium: Memory + memoryLimit: 128Mi + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + emptyDir: + medium: Memory + memoryLimit: 128Mi + count: 1 + + - it: should render scratch volume using details from reports values, even when global scratchVolume is explicitly set + template: reports_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.reports.use_volume: true + reports.scratchVolume.details: + hostPath: + path: /tmp + type: Directory + scratchVolume.details: + emptyDir: + medium: Memory + memoryLimit: 128Mi + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + hostPath: + path: /tmp + type: Directory + count: 1 + + - it: should render scratch volume using details from global values, when reports values are not set + template: reports_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.reports.use_volume: true + scratchVolume.details: + emptyDir: + medium: Memory + memoryLimit: 128Mi + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + emptyDir: + medium: Memory + memoryLimit: 128Mi + count: 1 + + - it: should not render scratch volume when use_volume is not enabled (default) + template: reports_deployment.yaml + documentIndex: 0 + asserts: + - notContains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + emptyDir: {} + count: 1 + + - it: should render fixPermissionsInitContainer when use_volume is enabled and fixGroupPermissions is set + templates: + - reports_deployment.yaml + documentIndex: 0 + set: + scratchVolume.fixGroupPermissions: true + securityContext.fsGroup: 9999 + anchoreConfig.reports.use_volume: true + asserts: + - contains: + path: spec.template.spec.initContainers + content: + name: mode-fixer + volumeMounts: + - name: "anchore-scratch" + mountPath: /analysis_scratch + command: [ sh, -c, (chmod 0775 /analysis_scratch; chgrp 9999 /analysis_scratch ) ] + count: 1 + any: true + + - it: should not render fixPermissionsInitContainer if use_volume isn't enabled + set: + scratchVolume.fixGroupPermissions: true + securityContext.fsGroup: 9999 + templates: + - reports_deployment.yaml + asserts: + - notExists: + path: spec.template.spec.initContainers + + - it: should render component extraVolumes + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: reports_deployment.yaml + documentIndex: 0 + set: + reports.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 diff --git a/stable/enterprise/tests/reportsworker_resources_test.yaml b/stable/enterprise/tests/reportsworker_resources_test.yaml new file mode 100644 index 00000000..90a21234 --- /dev/null +++ b/stable/enterprise/tests/reportsworker_resources_test.yaml @@ -0,0 +1,377 @@ +suite: Reports Resources Tests +templates: + - reportsworker_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: reportsworker_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-reportsworker + + - it: should render component labels + template: reportsworker_deployment.yaml + set: + reportsWorker.labels: + reports: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + reports: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + reports: test + test: foobar + template: reportsworker_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: reportsworker_deployment.yaml + set: + reportsWorker.annotations: + reports: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + reports: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + reports: test + test: foobar + template: reportsworker_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: reportsworker_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: reportsworker + + - it: should render component replica count + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.replicaCount: 2 + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should render component serviceAccountName + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.serviceAccountName: reports-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: reports-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + reportsWorker.serviceAccountName: reports-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: reports-test + + - it: should render component nodeSelector + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.nodeSelector: + reports: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + reports: test + + - it: should render component affinity + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.affinity: + reports: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + reports: test + + - it: should render component tolerations + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.tolerations: + - key: "reports" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "reports" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render component container name + template: reportsworker_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-reportsworker + + - it: should render component entrypoint args + template: reportsworker_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade reports_worker$ + count: 1 + + - it: should render reports component environment variables + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.extraEnv: + - name: reports + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-reportsworker + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8559" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: reports + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component ports + template: reportsworker_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: reportsworker + containerPort: 8559 + count: 1 + + - it: should render component probes + template: reportsworker_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: reportsworker + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: reportsworker + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component service type + template: reportsworker_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: reportsworker_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: reportsworker + port: 8559 + targetPort: 8559 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: reportsworker_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: reportsworker + count: 1 + + - it: should render component extraVolumes + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: reportsworker_deployment.yaml + documentIndex: 0 + set: + reportsWorker.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 diff --git a/stable/enterprise/tests/simplequeue_resources_test.yaml b/stable/enterprise/tests/simplequeue_resources_test.yaml new file mode 100644 index 00000000..e16fa53c --- /dev/null +++ b/stable/enterprise/tests/simplequeue_resources_test.yaml @@ -0,0 +1,352 @@ +suite: SimpleQueue Resources Tests +templates: + - simplequeue_deployment.yaml + - anchore_secret.yaml + - anchore_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: simplequeue_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-simplequeue + + - it: should render component labels + template: simplequeue_deployment.yaml + set: + simpleQueue.labels: + simplequeue: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + simplequeue: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + simplequeue: test + test: foobar + template: simplequeue_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: simplequeue_deployment.yaml + set: + simpleQueue.annotations: + simplequeue: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + simplequeue: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + simplequeue: test + test: foobar + template: simplequeue_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: simplequeue_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: simplequeue + + - it: should render component replica count + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.replicaCount: 2 + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should render component serviceAccountName + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.serviceAccountName: simplequeue-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: simplequeue-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + simpleQueue.serviceAccountName: simplequeue-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: simplequeue-test + + - it: should render component nodeSelector + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.nodeSelector: + simplequeue: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + simplequeue: test + + - it: should render component affinity + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.affinity: + simplequeue: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + simplequeue: test + + - it: should render component tolerations + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.tolerations: + - key: "simplequeue" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "simplequeue" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render component container name + template: simplequeue_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-simplequeue + + - it: should render component entrypoint args + template: simplequeue_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade simplequeue$ + count: 1 + + - it: should render simplequeue component environment variables + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.extraEnv: + - name: simplequeue + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-simplequeue + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8083" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: simplequeue + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component ports + template: simplequeue_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: simplequeue + containerPort: 8083 + count: 1 + + - it: should render component probes + template: simplequeue_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: simplequeue + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: simplequeue + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component service type + template: simplequeue_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: simplequeue_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: simplequeue + port: 8083 + targetPort: 8083 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: simplequeue_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: simplequeue + count: 1 + + - it: should render component extraVolumes + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: simplequeue_deployment.yaml + documentIndex: 0 + set: + simpleQueue.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 diff --git a/stable/enterprise/tests/ui_resources_test.yaml b/stable/enterprise/tests/ui_resources_test.yaml new file mode 100644 index 00000000..9755d237 --- /dev/null +++ b/stable/enterprise/tests/ui_resources_test.yaml @@ -0,0 +1,388 @@ +suite: UI Resources Tests +templates: + - ui_deployment.yaml + - ui_secret.yaml + - ui_configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: should set the correct resource names + template: ui_deployment.yaml + asserts: + - equal: + path: metadata.name + value: test-release-enterprise-ui + + - it: should render component labels + template: ui_deployment.yaml + set: + ui.labels: + ui: test + test: foobar + asserts: + - isSubset: + path: metadata.labels + content: + ui: test + test: foobar + - isSubset: + path: spec.template.metadata.labels + content: + ui: test + test: foobar + template: ui_deployment.yaml + documentIndex: 0 + + - it: should render component annotations + template: ui_deployment.yaml + set: + ui.annotations: + ui: test + test: foobar + asserts: + - isSubset: + path: metadata.annotations + content: + ui: test + test: foobar + - isSubset: + path: spec.template.metadata.annotations + content: + ui: test + test: foobar + template: ui_deployment.yaml + documentIndex: 0 + + - it: should render component matchLabels + template: ui_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: ui + + - it: should render component serviceAccountName + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.serviceAccountName: ui-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: ui-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: ui_deployment.yaml + documentIndex: 0 + set: + serviceAccountName: global-test + ui.serviceAccountName: ui-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: ui-test + + - it: should render component nodeSelector + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.nodeSelector: + ui: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + ui: test + + - it: should render component affinity + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.affinity: + ui: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + ui: test + + - it: should render component tolerations + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.tolerations: + - key: "ui" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "ui" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render component container name + template: ui_deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: enterprise-ui + + - it: should render component entrypoint args + template: ui_deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh node \/home\/node\/aui\/build\/server.js$ + count: 1 + + - it: should render ui component environment variables + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.extraEnv: + - name: ui + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-enterprise-ui + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "80" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ui + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render environment variables for SSL certs when ssl is enabled + template: ui_deployment.yaml + documentIndex: 0 + set: + anchoreConfig.database: + ssl: true + sslRootCertFileName: test.crt + ui.ldapsRootCaCertName: ldap.crt + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: PGSSLROOTCERT + value: /home/anchore/certs/test.crt + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: NODE_EXTRA_CA_CERTS + value: /home/anchore/certs/ldap.crt + count: 1 + + - it: should render component ports + template: ui_deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: ui + containerPort: 3000 + protocol: TCP + count: 1 + + - it: should render component probes + template: ui_deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + tcpSocket: + port: ui + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /service/health + port: ui + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + - it: should render component service type + template: ui_deployment.yaml + documentIndex: 1 + asserts: + - equal: + path: spec.type + value: ClusterIP + count: 1 + + - it: should render component service ports + template: ui_deployment.yaml + documentIndex: 1 + asserts: + - contains: + path: spec.ports + content: + name: ui + port: 80 + targetPort: 3000 + protocol: TCP + count: 1 + + - it: should render component service selectors + template: ui_deployment.yaml + documentIndex: 1 + asserts: + - isSubset: + path: spec.selector + content: + app.kubernetes.io/name: test-release-enterprise + app.kubernetes.io/component: ui + count: 1 + + - it: should render component extraVolumes + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.extraVolumes: + - name: extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + + - it: should render component extraVolumeMounts + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + + - it: should render component and global extraVolumes + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.extraVolumes: + - name: extra-vol + emptyDir: {} + extraVolumes: + - name: global-extra-vol + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: extra-vol + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: global-extra-vol + emptyDir: {} + count: 1 + + - it: should render component and global extraVolumeMounts + template: ui_deployment.yaml + documentIndex: 0 + set: + ui.extraVolumeMounts: + - name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + extraVolumeMounts: + - name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: extra-vol + mountPath: /mnt/extra-vol + readOnly: false + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: global-extra-vol + mountPath: /mnt/global-extra-vol + readOnly: false + count: 1 diff --git a/stable/enterprise/tests/values.yaml b/stable/enterprise/tests/values.yaml new file mode 100644 index 00000000..9b015f99 --- /dev/null +++ b/stable/enterprise/tests/values.yaml @@ -0,0 +1,40 @@ +annotations: + foo: "bar" + bar: "baz" + +labels: + foo: "bar" + bar: "baz" + +extraEnv: + - name: foo + value: bar + - name: bar + value: baz + +anchoreConfig: + policyBundles: + custom_policy_bundle1.json: | + { + "id": "custom1", + "version": "1_0", + "name": "My custom bundle", + "comment": "My system's custom bundle", + "whitelisted_images": [], + "blacklisted_images": [], + "mappings": [], + "whitelists": [], + "policies": [] + } + custom_policy_bundle2.json: | + { + "id": "custom2", + "version": "1_0", + "name": "My Other Bundle", + "comment": "My system's other custom bundle", + "whitelisted_images": [], + "blacklisted_images": ["docker.io/centos:latest"], + "mappings": [], + "whitelists": [], + "policies": [] + } diff --git a/stable/enterprise/values.yaml b/stable/enterprise/values.yaml new file mode 100644 index 00000000..0997dd57 --- /dev/null +++ b/stable/enterprise/values.yaml @@ -0,0 +1,1597 @@ +################################################### +## @section Global Resource Parameters +## Global params used by all child charts +################################################### +global: + ## @param global.fullnameOverride overrides the fullname set on resources + ## + fullnameOverride: "" + + ## @param global.nameOverride overrides the name set on resources + ## + nameOverride: "" + +################################################### +## @section Common Resource Parameters +## Common params used by all Anchore k8s resources +################################################### + +## @param image Image used for all Anchore Enterprise deployments, excluding Anchore UI +## +image: docker.io/anchore/enterprise:v5.4.0 + +## @param imagePullPolicy Image pull policy used by all deployments +## ref: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy +## +imagePullPolicy: IfNotPresent + +## @param imagePullSecretName Name of Docker credentials secret for access to private repos +## Pre-populated with the pull secret name specified in the Anchore docs & quick start instructions +## Secrets must be manually created in the same namespace as release +## +imagePullSecretName: anchore-enterprise-pullcreds + +## @param startMigrationPod Spin up a Database migration pod to help migrate the database to the new schema +## +startMigrationPod: false + +## @param migrationPodImage The image reference to the migration pod +## +migrationPodImage: docker.io/postgres:13-bookworm + +## @param migrationAnchoreEngineSecretName The name of the secret that has anchore-engine values +## +migrationAnchoreEngineSecretName: my-engine-anchore-engine + +## @param serviceAccountName Name of a service account used to run all Anchore pods +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccountName: "" + +## @param injectSecretsViaEnv Enable secret injection into pod via environment variables instead of via k8s secrets +## Useful for injecting secrets directly into k8s pods from Hashicorp vault +## +injectSecretsViaEnv: false + +## @param licenseSecretName Name of the Kubernetes secret containing your license.yaml file +## This must be manually created. For example with the following command: +## `kubectl create secret generic anchore-enterprise-license --from-file=license.yaml=` +## +licenseSecretName: anchore-enterprise-license + +## @param certStoreSecretName Name of secret containing the certificates & keys used for SSL, SAML & CAs +## The chart will mount the secret specified in certStoreSecretName to /home/anchore/certs +## Secret must be manually created in the same namespace as release +## ref: https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod +## +certStoreSecretName: "" + +## @param extraEnv Common environment variables set on all containers +## ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ +## +extraEnv: [] + +## @param useExistingSecrets forgoes secret creation and uses the secret defined in existingSecretName +## When useExistingSecrets is set to `true` the chart will not create secrets specifying the environment variables used in deployments. +## Instead, the chart will use secrets that have already been applied to the namespace that this chart is being deployed to. +## +useExistingSecrets: false + +## @param existingSecretName Name of an existing secret to be used for Anchore core services, excluding Anchore UI +## This secret must be created manually & should define the following environment variables: +## ANCHORE_ADMIN_PASSWORD +## ANCHORE_DB_PASSWORD +## ANCHORE_DB_USER +## ANCHORE_DB_NAME +## ANCHORE_DB_HOST +## ANCHORE_DB_PORT +## ANCHORE_SAML_SECRET (if applicable) +## ANCHORE_GITHUB_TOKEN (if applicable) +## +existingSecretName: anchore-enterprise-env + +## @param labels Common labels set on all Kubernetes resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +labels: {} + +## @param annotations Common annotations set on all Kubernetes resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +annotations: {} + +## @param nodeSelector Common nodeSelector set on all Kubernetes pods +## +nodeSelector: {} + +## @param tolerations Common tolerations set on all Kubernetes pods +## +tolerations: [] + +## @param affinity Common affinity set on all Kubernetes pods +## +affinity: {} + +## @param scratchVolume.mountPath The mount path of an external volume for scratch space. Used for the following pods: analyzer, policy-engine, catalog, and reports +## @param scratchVolume.fixGroupPermissions Enable an initContainer that will fix the fsGroup permissions on all scratch volumes +## @param scratchVolume.fixerInitContainerImage The image to use for the mode-fixer initContainer +## @param scratchVolume.details [object] Details for the k8s volume to be created (defaults to default emptyDir) +## Scratch volume details can be specified per component using the component's scratchVolume object (e.g. .Values.analyzer.scratchVolume) +## Generally speaking you need to provision 3x the size of the largest image (uncompressed) that you want to analyze +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +scratchVolume: + mountPath: /analysis_scratch + fixGroupPermissions: false + fixerInitContainerImage: alpine + details: {} + +## @param extraVolumes mounts additional volumes to each pod +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +extraVolumes: [] +# - name: config +# secret: +# secretName: config + +## @param extraVolumeMounts mounts additional volumes to each pod +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +extraVolumeMounts: [] +# - name: config +# mountPath: "/vault/secrets/config" +# subPath: config +# readOnly: true + +## @param securityContext.runAsUser The securityContext runAsUser for all Anchore pods +## @param securityContext.runAsGroup The securityContext runAsGroup for all Anchore pods +## @param securityContext.fsGroup The securityContext fsGroup for all Anchore pods +## By default the Anchore Enterprise images utilize the user/group 'anchore' using uid/gid 1000 +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + +## @param containerSecurityContext The securityContext for all containers +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## +containerSecurityContext: {} + +## @param probes.liveness.initialDelaySeconds Initial delay seconds for liveness probe +## @param probes.liveness.timeoutSeconds Timeout seconds for liveness probe +## @param probes.liveness.periodSeconds Period seconds for liveness probe +## @param probes.liveness.failureThreshold Failure threshold for liveness probe +## @param probes.liveness.successThreshold Success threshold for liveness probe +## @param probes.readiness.timeoutSeconds Timeout seconds for the readiness probe +## @param probes.readiness.periodSeconds Period seconds for the readiness probe +## @param probes.readiness.failureThreshold Failure threshold for the readiness probe +## @param probes.readiness.successThreshold Success threshold for the readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## +probes: + liveness: + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + readiness: + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + +## @param doSourceAtEntry.enabled Does a `source` of the file path defined before starting Anchore services +## @param doSourceAtEntry.filePaths List of file paths to `source` before starting Anchore services +## For example, if using hashicorp vault, set to /vault/secrets/config +## +doSourceAtEntry: + enabled: false + filePaths: [] + # - "/vault/secrets/config" + +## @param configOverride Allows for overriding the default Anchore configuration file +## This value can be used to pass in a custom configuration file for Anchore services using a block literal string +## This config file will be templated when it is added to the configmap, so Helm values and functions can be used +## +configOverride: "" + +## @param scripts [object] Collection of helper scripts usable in all anchore enterprise pods +## +scripts: + anchore-config: | + #!/bin/bash + while IFS= read -r line; do + while [[ "$line" =~ (\$\{[a-zA-Z_][a-zA-Z_0-9]*\}) ]]; do + VAR_NAME=${BASH_REMATCH[1]#*\{}; VAR_NAME=${VAR_NAME%\}}; + line=${line//${BASH_REMATCH[1]}/${!VAR_NAME}}; + done; + printf '%s\n' "$line"; + done < /config/config.yaml + +##################################################################### +## @section Anchore Configuration Parameters +## Params used for all Anchore Enterprise service configuration files +##################################################################### + +anchoreConfig: + ## @param anchoreConfig.service_dir Path to directory where default Anchore config files are placed at startup + ## This path must be a writable location for the pod + ## + service_dir: /anchore_service + + ## @param anchoreConfig.log_level The log level for Anchore services + ## options available: FATAL, ERROR, WARN, INFO, DEBUG, SPEW + ## + log_level: INFO + + ## @param anchoreConfig.allow_awsecr_iam_auto Enable AWS IAM instance role for ECR auth + ## When set, if a registry credential username is set to 'iamauto' for an ecr registry, the engine will + ## use whatever aws creds are available in the standard boto search path (.aws, env, etc) + ## + allow_awsecr_iam_auto: true + + ## @param anchoreConfig.keys.secret The shared secret used for signing & encryption, auto-generated by Helm if not set. + ## If using useExistingSecrets=true, this can be set with the env var ANCHORE_SAML_SECRET + ## @param anchoreConfig.keys.privateKeyFileName The file name of the private key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName + ## @param anchoreConfig.keys.publicKeyFileName The file name of the public key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName + ## Only one of anchoreConfig.keys.secret or anchoreConfig.keys.privateKeyFileName/anchoreConfig.keys.publicKeyFileName should be configured. + ## If all are set then the keys take precedence over the secret value. + ## + keys: + secret: "" + privateKeyFileName: "" + publicKeyFileName: "" + + ## @param anchoreConfig.user_authentication.oauth.enabled Enable OAuth for Anchore user authentication + ## @param anchoreConfig.user_authentication.oauth.default_token_expiration_seconds The expiration, in seconds, for OAuth tokens + ## @param anchoreConfig.user_authentication.oauth.refresh_token_expiration_seconds The expiration, in seconds, for OAuth refresh tokens + ## @param anchoreConfig.user_authentication.allow_api_keys_for_saml_users Enable API key generation and authentication for SAML users + ## NOTE: Enterprise cannot automatically revoke or delete keys for disabled SAML users, the admin must revoke all keys assigned to a SAML user after disabling that user. + ## @param anchoreConfig.user_authentication.max_api_key_age_days The maximum age, in days, for API keys + ## @param anchoreConfig.user_authentication.max_api_keys_per_user The maximum number of API keys per user + ## @param anchoreConfig.user_authentication.remove_deleted_user_api_keys_older_than_days The number of days elapsed after a user API key is deleted before it is garbage collected (-1 to disable) + ## ref: https://docs.anchore.com/current/docs/installation/configuration/user_credential_storage/#bearer-tokensoauth2 + ## + ## @param anchoreConfig.user_authentication.hashed_passwords Enable storing passwords as secure hashes in the database + ## This can dramatically increase CPU usage if you don't also use OAuth and tokens for internal communications + ## WARNING: You should not change this after a system has been initialized as it may cause a mismatch in existing passwords + ## ref: https://docs.anchore.com/current/docs/installation/configuration/user_credential_storage/#configuring-hashed-passwords-and-oauth + ## + ## @param anchoreConfig.user_authentication.sso_require_existing_users set to true in order to disable the SSO JIT provisioning during authentication + ## This provides an additional layer of security and configuration for SSO users to gain access to Anchore. + ## + user_authentication: + oauth: + enabled: true + default_token_expiration_seconds: 3600 + refresh_token_expiration_seconds: 86400 + allow_api_keys_for_saml_users: false + max_api_key_age_days: 365 + max_api_keys_per_user: 100 + hashed_passwords: true + sso_require_existing_users: false + remove_deleted_user_api_keys_older_than_days: 365 + + ## @param anchoreConfig.metrics.enabled Enable Prometheus metrics for all Anchore services + ## @param anchoreConfig.metrics.auth_disabled Disable auth on Prometheus metrics for all Anchore services + ## + metrics: + enabled: false + auth_disabled: false + + ## @param anchoreConfig.webhooks Enable Anchore services to provide webhooks for external system updates + ## Used to configure an endpoint for general notification delivery. These events are image/tag updates etc + ## This is globally configured and updates for all users are sent to the same host but with a different path for each user + ## / are required as documented at end of URI - only hostname:port should be configured + ## ref: https://docs.anchore.com/current/docs/using/cli_usage/subscriptions/#webhook-configuration + ## + webhooks: {} + # webhook_user: 'user' + # webhook_pass: 'pass' + # ssl_verify: true + # general: {} + # url: "http://somehost:9090//" + # policy_eval: {} + # event_log: {} + + ## @param anchoreConfig.default_admin_password The password for the Anchore Enterprise admin user + ## This value is only used during creation of the admin user, cannot be used to change the password + ## + default_admin_password: "" + + ## @param anchoreConfig.default_admin_email The email address used for the Anchore Enterprise admin user + ## This value is only used during creation of the admin user, cannot be used to change the email address + ## + default_admin_email: "admin@myanchore" + + ## @param anchoreConfig.database.timeout + ## @param anchoreConfig.database.ssl Enable SSL/TLS for the database connection + ## @param anchoreConfig.database.sslMode The SSL mode to use for database connection + ## @param anchoreConfig.database.sslRootCertFileName File name of the database root CA certificate stored in the k8s secret specified with .Values.certStoreSecretName + ## @param anchoreConfig.database.db_pool_size The database max connection pool size + ## @param anchoreConfig.database.db_pool_max_overflow The maximum overflow size of the database connection pool + ## @param anchoreConfig.database.engineArgs Set custom database engine arguments for SQLAlchemy + ## ref: https://docs.sqlalchemy.org/en/14/core/engines.html#engine-creation-api + ## + database: + timeout: 120 + ssl: false + sslMode: verify-full + sslRootCertFileName: "" + db_pool_size: 30 + db_pool_max_overflow: 100 + engineArgs: {} + + ## @param anchoreConfig.internalServicesSSL.enabled Force all Enterprise services to use SSL for internal communication + ## @param anchoreConfig.internalServicesSSL.verifyCerts Enable cert verification against the local cert bundle, if this set to false self-signed certs are allowed + ## @param anchoreConfig.internalServicesSSL.certSecretKeyFileName File name of the private key used for internal SSL stored in the secret specified in .Values.certStoreSecretName + ## @param anchoreConfig.internalServicesSSL.certSecretCertFileName File name of the root CA certificate used for internal SSL stored in the secret specified in .Values.certStoreSecretName + ## ref: https://docs.anchore.com/current/docs/installation/configuration/tls_ssl_config/ + ## + internalServicesSSL: + enabled: false + verifyCerts: false + certSecretKeyFileName: "" + certSecretCertFileName: "" + + ## @param anchoreConfig.policyBundles Include custom Anchore policy bundles + ## This object represents the data of a configmap that is mounted to /policies of all Anchore services + ## ref: https://docs.anchore.com/current/docs/overview/concepts/policy/bundles/ + ## + policyBundles: {} + # custom_policy_bundle1.json: | + # { + # "id": "custom1", + # "version": "1_0", + # "name": "My custom bundle", + # "comment": "My system's custom bundle", + # "whitelisted_images": [], + # "blacklisted_images": [], + # "mappings": [], + # "whitelists": [], + # "policies": [] + # } + # custom_policy_bundle2.json: | + # { + # .... + # } + + apiext: + ## TODO - get better documentation for the 'external' option + ## @param anchoreConfig.apiext.external.enabled Allow overrides for constructing Anchore API URLs + ## @param anchoreConfig.apiext.external.useTLS Enable TLS for external API access + ## @param anchoreConfig.apiext.external.hostname Hostname for the external Anchore API + ## @param anchoreConfig.apiext.external.port Port configured for external Anchore API + ## + external: + enabled: false + useTLS: true + hostname: "" + port: 8443 + + analyzer: + ## @param anchoreConfig.analyzer.cycle_timers.image_analyzer The interval between checks of the work queue for new analysis jobs + ## + cycle_timers: + image_analyzer: 1 + + ## @param anchoreConfig.analyzer.layer_cache_max_gigabytes Specify a cache size > 0GB to enable image layer caching + ## This chart sets up a scratch directory for all Anchore Analyzer pods using the values found at .Values.scratchVolume + ## When setting .Values.anchoreConfig.analyzer.layer_cache_max_gigabytes, ensure the scratch volume has sufficient storage space + ## ref: https://docs.anchore.com/current/docs/installation/storage/layer_caching/ + ## + layer_cache_max_gigabytes: 0 + + ## @param anchoreConfig.analyzer.enable_hints Enable a user-supplied 'hints' file to override and/or augment the software artifacts found during analysis + ## Once enabled, the Anchore Analyzer services will look for a file with a specific name, location and format located within the container image - /anchore_hints.json + ## ref: https://docs.anchore.com/current/docs/installation/configuration/content_hints/ + ## + enable_hints: false + + ## @param anchoreConfig.analyzer.configFile [object] Custom Anchore Analyzer configuration file contents in YAML + ## This configuration file is used to configure regex matching, file content searching and malware scanning + ## ref: https://docs.anchore.com/current/docs/overview/concepts/policy/policy_checks/#gate-files + ## ref: https://docs.anchore.com/current/docs/overview/concepts/images/analysis/malware_scanning/ + ## + configFile: + # Anchore Analyzer config file + ## WARNING - malforming this file can cause the Anchore Analyzer to fail on all image analysis + # + retrieve_files: + file_list: + - '/etc/passwd' + # - '/etc/services' + # - '/etc/sudoers' + secret_search: + match_params: + - MAXFILESIZE=10000 + - STOREONMATCH=n + regexp_match: + - "AWS_ACCESS_KEY=(?i).*aws_access_key_id( *=+ *).*(?:/v2/reports/graphql in a browser for the reports API + ## ref: https://docs.anchore.com/current/docs/using/api_usage/reports/ + ## + enable_graphiql: true + + ## @param anchoreConfig.reports.async_execution_timeout Configure how long a scheduled query must be running for before it is considered timed out + ## This may need to be adjusted if the system has large amounts of data and reports are being prematurely timed out. + ## The value should be a number followed by "w", "d", or "h" to represent weeks, days or hours + async_execution_timeout: 48h + + ## @param anchoreConfig.reports.cycle_timers.reports_scheduled_queries Interval in seconds to check for scheduled queries that need to be run + ## + cycle_timers: + reports_scheduled_queries: 600 + + ## @param anchoreConfig.reports.use_volume Configure the reports service to buffer report generation to disk instead of in memory + ## This should be configured in production systems with large amounts of data (10s of thousands of images or more) + ## Generally speaking you need to provision 2x the size of the largest report that you expect to generate + ## We recommend utilizing an ephemeral PVC for your scratch volume, this makes it easy to provision enough storage for large reports + ## Scratch volume can be configured using .Values.reports.scratchVolume.details object + ## + use_volume: false + + reports_worker: + ## @param anchoreConfig.reports_worker.enable_data_ingress Enable periodically syncing data into the Anchore Reports Service + ## + enable_data_ingress: true + + ## @param anchoreConfig.reports_worker.enable_data_egress Periodically remove reporting data that has been removed in other parts of system + ## + enable_data_egress: false + + ## @param anchoreConfig.reports_worker.data_egress_window defines a number of days to keep reporting data following its deletion in the rest of system. + ## Default value of 0 will remove it on next task run + ## + data_egress_window: 0 + + ## @param anchoreConfig.reports_worker.data_refresh_max_workers The maximum number of concurrent threads to refresh existing results (etl vulnerabilities and evaluations) in reports service. + ## + data_refresh_max_workers: 10 + + ## @param anchoreConfig.reports_worker.data_load_max_workers The maximum number of concurrent threads to load new results (etl vulnerabilities and evaluations) to reports service. + ## + data_load_max_workers: 10 + + ## @param anchoreConfig.reports_worker.cycle_timers.reports_image_load Interval that vulnerabilities for images are synced + ## @param anchoreConfig.reports_worker.cycle_timers.reports_tag_load Interval that vulnerabilities by tags are synced + ## @param anchoreConfig.reports_worker.cycle_timers.reports_runtime_inventory_load Interval that the runtime inventory is synced + ## @param anchoreConfig.reports_worker.cycle_timers.reports_extended_runtime_vuln_load Interval extended runtime reports are synched (ecs, k8s containers and namespaces) + ## @param anchoreConfig.reports_worker.cycle_timers.reports_image_refresh Interval that images are refreshed + ## @param anchoreConfig.reports_worker.cycle_timers.reports_tag_refresh Interval that tags are refreshed + ## @param anchoreConfig.reports_worker.cycle_timers.reports_metrics Interval for how often reporting metrics are generated + ## @param anchoreConfig.reports_worker.cycle_timers.reports_image_egress Interval stale states are removed by image + ## @param anchoreConfig.reports_worker.cycle_timers.reports_tag_egress Interval stale states are removed by tag + ## + cycle_timers: + reports_image_load: 600 + reports_tag_load: 600 + reports_runtime_inventory_load: 600 + reports_extended_runtime_vuln_load: 1800 + reports_image_refresh: 7200 + reports_tag_refresh: 7200 + reports_metrics: 3600 + reports_image_egress: 600 + reports_tag_egress: 600 + + ui: + ## @param anchoreConfig.ui.enable_proxy Trust a reverse proxy when setting secure cookies (via the `X-Forwarded-Proto` header) + ## + enable_proxy: false + + ## @param anchoreConfig.ui.enable_ssl Enable SSL in the Anchore UI container + ## + enable_ssl: false + + ## @param anchoreConfig.ui.enable_shared_login Allow single user to start multiple Anchore UI sessions + ## When set to `false`, only one session per credential is permitted at a time, and logging in will invalidate any other + ## sessions that are using the same set of credentials. + ## + enable_shared_login: true + + ## @param anchoreConfig.ui.redis_flushdb Flush user session keys and empty data on Anchore UI startup + ## If the datastore is flushed, any users with active sessions will be required to re-authenticate + ## + redis_flushdb: true + + ## @param anchoreConfig.ui.force_websocket Force WebSocket protocol for socket message communications + ## + force_websocket: false + + ## @param anchoreConfig.ui.authentication_lock.count Number of failed authentication attempts allowed before a temporary lock is applied + ## @param anchoreConfig.ui.authentication_lock.expires Authentication lock duration + ## + authentication_lock: + count: 5 + expires: 300 + + ## @param anchoreConfig.ui.custom_links List of up to 10 external links provided + ## Each link entry must have a title of greater than 0-length and a valid URI. If either item is invalid, the entry will be excluded. + ## + custom_links: {} + # title: Custom External Links + # links: + # - title: Example Link 1 + # uri: https://example.com + + ## @param anchoreConfig.ui.enable_add_repositories Specify what users can add image repositories to the Anchore UI + ## + enable_add_repositories: {} + # admin: True + # standard: True + + ## @param anchoreConfig.ui.log_level Descriptive detail of the application log output + ## valid values are error, warn, info, http, debug + ## + log_level: http + + ## @param anchoreConfig.ui.enrich_inventory_view aggregate and include compliance and vulnerability data from the reports service. + ## + enrich_inventory_view: true + + ## @param anchoreConfig.ui.appdb_config.native toggle the postgreSQL drivers used to connect to the database between the native and the NodeJS drivers. + ## @param anchoreConfig.ui.appdb_config.pool.max maximum number of simultaneous connections allowed in the connection pool + ## @param anchoreConfig.ui.appdb_config.pool.min minimum number of connections + ## @param anchoreConfig.ui.appdb_config.pool.acquire the timeout in milliseconds used when acquiring a new connection + ## @param anchoreConfig.ui.appdb_config.pool.idle the maximum time that a connection can be idle before being released + ## + appdb_config: + native: true + pool: + max: 10 + min: 0 + acquire: 30000 + idle: 10000 + + ## @param anchoreConfig.ui.dbUser allows overriding and separation of the ui database user. + ## The UI config defaults to postgresql.auth.username if not set + ## + dbUser: "" + + ## @param anchoreConfig.ui.dbPassword allows overriding and separation of the ui database user authentication + ## The UI config defaults to postgresql.auth.password if not set + ## + dbPassword: "" + +###################################################### +## @section Anchore Analyzer k8s Deployment Parameters +###################################################### +analyzer: + ## @param analyzer.replicaCount Number of replicas for the Anchore Analyzer deployment + ## + replicaCount: 1 + + ## @param analyzer.service.port The port used for gatherings metrics when .Values.metricsEnabled=true + ## + service: + port: 8084 + + ## @param analyzer.extraEnv Set extra environment variables for Anchore Analyzer pods + ## + extraEnv: [] + + ## @param analyzer.resources Resource requests and limits for Anchore Analyzer pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 1000m + # memory: 8000Mi + # limits: + # memory: 8000Mi + + ## @param analyzer.labels Labels for Anchore Analyzer pods + ## + labels: {} + + ## @param analyzer.annotations Annotation for Anchore Analyzer pods + ## + annotations: {} + + ## @param analyzer.nodeSelector Node labels for Anchore Analyzer pod assignment + ## + nodeSelector: {} + + ## @param analyzer.tolerations Tolerations for Anchore Analyzer pod assignment + ## + tolerations: [] + + ## @param analyzer.affinity Affinity for Anchore Analyzer pod assignment + ## + affinity: {} + + ## @param analyzer.serviceAccountName Service account name for Anchore API pods + ## + serviceAccountName: "" + + ## @param analyzer.scratchVolume.details [object] Details for the k8s volume to be created for Anchore Analyzer scratch space + ## + scratchVolume: + details: {} + +################################################# +## @section Anchore API k8s Deployment Parameters +################################################# +api: + ## @param api.replicaCount Number of replicas for Anchore API deployment + ## + replicaCount: 1 + + ## @param api.service.type Service type for Anchore API + ## @param api.service.port Service port for Anchore API + ## @param api.service.annotations Annotations for Anchore API service + ## @param api.service.labels Labels for Anchore API service + ## @param api.service.nodePort nodePort for Anchore API service + ## + service: + type: ClusterIP + port: 8228 + annotations: {} + labels: {} + nodePort: "" + + ## @param api.extraEnv Set extra environment variables for Anchore API pods + ## + extraEnv: [] + + ## @param api.extraVolumes Define additional volumes for Anchore API pods + ## + extraVolumes: [] + + ## @param api.extraVolumeMounts Define additional volume mounts for Anchore API pods + ## + extraVolumeMounts: [] + + ## @param api.resources Resource requests and limits for Anchore API pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 500m + # memory: 1000Mi + # limits: + # memory: 1000Mi + + ## @param api.labels Labels for Anchore API pods + ## + labels: {} + + ## @param api.annotations Annotation for Anchore API pods + ## + annotations: {} + + ## @param api.nodeSelector Node labels for Anchore API pod assignment + ## + nodeSelector: {} + + ## @param api.tolerations Tolerations for Anchore API pod assignment + ## + tolerations: [] + + ## @param api.affinity Affinity for Anchore API pod assignment + ## + affinity: {} + + ## @param api.serviceAccountName Service account name for Anchore API pods + ## + serviceAccountName: "" + +##################################################### +## @section Anchore Catalog k8s Deployment Parameters +##################################################### +catalog: + ## @param catalog.replicaCount Number of replicas for the Anchore Catalog deployment + ## + replicaCount: 1 + + ## @param catalog.service.type Service type for Anchore Catalog + ## @param catalog.service.port Service port for Anchore Catalog + ## @param catalog.service.annotations Annotations for Anchore Catalog service + ## @param catalog.service.labels Labels for Anchore Catalog service + ## @param catalog.service.nodePort nodePort for Anchore Catalog service + ## + service: + type: ClusterIP + port: 8082 + annotations: {} + labels: {} + nodePort: "" + + ## @param catalog.extraEnv Set extra environment variables for Anchore Catalog pods + ## + extraEnv: [] + + ## @param catalog.extraVolumes Define additional volumes for Anchore Catalog pods + ## + extraVolumes: [] + + ## @param catalog.extraVolumeMounts Define additional volume mounts for Anchore Catalog pods + ## + extraVolumeMounts: [] + + ## @param catalog.resources Resource requests and limits for Anchore Catalog pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 1000m + # memory: 8000Mi + # limits: + # memory: 8000Mi + + ## @param catalog.labels Labels for Anchore Catalog pods + ## + labels: {} + + ## @param catalog.annotations Annotation for Anchore Catalog pods + ## + annotations: {} + + ## @param catalog.nodeSelector Node labels for Anchore Catalog pod assignment + ## + nodeSelector: {} + + ## @param catalog.tolerations Tolerations for Anchore Catalog pod assignment + ## + tolerations: [] + + ## @param catalog.affinity Affinity for Anchore Catalog pod assignment + ## + affinity: {} + + ## @param catalog.serviceAccountName Service account name for Anchore Catalog pods + ## + serviceAccountName: "" + + ## @param catalog.scratchVolume.details [object] Details for the k8s volume to be created for Anchore Catalog scratch space + ## + scratchVolume: + details: {} + +########################################## +## @section Anchore Feeds Chart Parameters +########################################## +feeds: + ## @param feeds.chartEnabled Enable the Anchore Feeds chart + ## + chartEnabled: true + + ## @param feeds.standalone Sets the Anchore Feeds chart to run into non-standalone mode, for use with Anchore Enterprise. + ## This should never be set to true when chartEnabled=true. + ## + standalone: false + + ## @param feeds.url Set the URL for a standalone Feeds service. Use when chartEnabled=false. + ## + url: "" + + ## @param feeds.resources Resource requests and limits for Anchore Feeds pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 2500m + # memory: 10Gi + # limits: + # memory: 10Gi + + +############################################ +## @section Anchore Notifications Parameters +############################################ +notifications: + ## @param notifications.replicaCount Number of replicas for the Anchore Notifications deployment + ## + replicaCount: 1 + + ## @param notifications.service.type Service type for Anchore Notifications + ## @param notifications.service.port Service port for Anchore Notifications + ## @param notifications.service.annotations Annotations for Anchore Notifications service + ## @param notifications.service.labels Labels for Anchore Notifications service + ## @param notifications.service.nodePort nodePort for Anchore Notifications service + ## + service: + type: ClusterIP + port: 8668 + annotations: {} + labels: {} + nodePort: "" + + ## @param notifications.extraEnv Set extra environment variables for Anchore Notifications pods + ## + extraEnv: [] + + ## @param notifications.extraVolumes Define additional volumes for Anchore Notifications pods + ## + extraVolumes: [] + + ## @param notifications.extraVolumeMounts Define additional volume mounts for Anchore Notifications pods + ## + extraVolumeMounts: [] + + ## @param notifications.resources Resource requests and limits for Anchore Notifications pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 100m + # memory: 500Mi + # limits: + # memory: 500Mi + + ## @param notifications.labels Labels for Anchore Notifications pods + ## + labels: {} + + ## @param notifications.annotations Annotation for Anchore Notifications pods + ## + annotations: {} + + ## @param notifications.nodeSelector Node labels for Anchore Notifications pod assignment + ## + nodeSelector: {} + + ## @param notifications.tolerations Tolerations for Anchore Notifications pod assignment + ## + tolerations: [] + + ## @param notifications.affinity Affinity for Anchore Notifications pod assignment + ## + affinity: {} + + ## @param notifications.serviceAccountName Service account name for Anchore Notifications pods + ## + serviceAccountName: "" + +########################################################### +## @section Anchore Policy Engine k8s Deployment Parameters +########################################################### +policyEngine: + ## @param policyEngine.replicaCount Number of replicas for the Anchore Policy Engine deployment + ## + replicaCount: 1 + + ## @param policyEngine.service.type Service type for Anchore Policy Engine + ## @param policyEngine.service.port Service port for Anchore Policy Engine + ## @param policyEngine.service.annotations Annotations for Anchore Policy Engine service + ## @param policyEngine.service.labels Labels for Anchore Policy Engine service + ## @param policyEngine.service.nodePort nodePort for Anchore Policy Engine service + ## + service: + type: ClusterIP + port: 8087 + annotations: {} + labels: {} + nodePort: "" + + ## @param policyEngine.extraEnv Set extra environment variables for Anchore Policy Engine pods + ## + extraEnv: [] + + ## @param policyEngine.extraVolumes Define additional volumes for Anchore Policy Engine pods + ## + extraVolumes: [] + + ## @param policyEngine.extraVolumeMounts Define additional volume mounts for Anchore Policy Engine pods + ## + extraVolumeMounts: [] + + ## @param policyEngine.resources Resource requests and limits for Anchore Policy Engine pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 2000m + # memory: 8000Mi + # limits: + # memory: 8000Mi + + ## @param policyEngine.labels Labels for Anchore Policy Engine pods + ## + labels: {} + + ## @param policyEngine.annotations Annotation for Anchore Policy Engine pods + ## + annotations: {} + + ## @param policyEngine.nodeSelector Node labels for Anchore Policy Engine pod assignment + ## + nodeSelector: {} + + ## @param policyEngine.tolerations Tolerations for Anchore Policy Engine pod assignment + ## + tolerations: [] + + ## @param policyEngine.affinity Affinity for Anchore Policy Engine pod assignment + ## + affinity: {} + + ## @param policyEngine.serviceAccountName Service account name for Anchore Policy Engine pods + ## + serviceAccountName: "" + + ## @param policyEngine.scratchVolume.details [object] Details for the k8s volume to be created for Anchore Policy Engine scratch space + ## + scratchVolume: + details: {} + +######################################## +## @section Anchore Reports Parameters +######################################## +reports: + ## @param reports.replicaCount Number of replicas for the Anchore Reports deployment + ## + replicaCount: 1 + + ## @param reports.service.type Service type for Anchore Reports + ## @param reports.service.port Service port for Anchore Reports + ## @param reports.service.annotations Annotations for Anchore Reports service + ## @param reports.service.labels Labels for Anchore Reports service + ## @param reports.service.nodePort nodePort for Anchore Reports service + ## + service: + type: ClusterIP + port: 8558 + annotations: {} + labels: {} + nodePort: "" + + ## @param reports.extraEnv Set extra environment variables for Anchore Reports pods + ## + extraEnv: [] + + ## @param reports.extraVolumes Define additional volumes for Anchore Reports pods + ## + extraVolumes: [] + + ## @param reports.extraVolumeMounts Define additional volume mounts for Anchore Reports pods + ## + extraVolumeMounts: [] + + ## @param reports.resources Resource requests and limits for Anchore Reports pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations + ## NOTE: the commented resources below are assuming the use of a scratch volume with `anchoreConfig.reports.use_volume=true` + ## If not using a scratch volume, the memory resources may need to be increased. + ## + resources: {} + # requests: + # memory: 2000Mi + # cpu: 2000m + # limits: + # memory: 2000Mi + + ## @param reports.labels Labels for Anchore Reports pods + ## + labels: {} + + ## @param reports.annotations Annotation for Anchore Reports pods + ## + annotations: {} + + ## @param reports.nodeSelector Node labels for Anchore Reports pod assignment + ## + nodeSelector: {} + + ## @param reports.tolerations Tolerations for Anchore Reports pod assignment + ## + tolerations: [] + + ## @param reports.affinity Affinity for Anchore Reports pod assignment + ## + affinity: {} + + ## @param reports.serviceAccountName Service account name for Anchore Reports pods + ## + serviceAccountName: "" + + ## @param reports.scratchVolume.details [object] Details for the k8s volume to be created for Anchore Reports scratch space + ## ref: https://kubernetes.io/docs/concepts/storage/volumes/ + ## + scratchVolume: + details: {} + # ephemeral: + # volumeClaimTemplate: + # spec: + # accessModes: + # - ReadWriteOnce + # resources: + # requests: + # storage: 100Gi + # storageClassName: "" + +############################################# +## @section Anchore Reports Worker Parameters +############################################# +reportsWorker: + ## @param reportsWorker.replicaCount Number of replicas for the Anchore Reports deployment + ## + replicaCount: 1 + + ## @param reportsWorker.service.type Service type for Anchore Reports Worker + ## @param reportsWorker.service.port Service port for Anchore Reports Worker + ## @param reportsWorker.service.annotations Annotations for Anchore Reports Worker service + ## @param reportsWorker.service.labels Labels for Anchore Reports Worker service + ## @param reportsWorker.service.nodePort nodePort for Anchore Reports Worker service + ## + service: + type: ClusterIP + port: 8559 + annotations: {} + labels: {} + nodePort: "" + + ## @param reportsWorker.extraEnv Set extra environment variables for Anchore Reports Worker pods + ## + extraEnv: [] + + ## @param reportsWorker.extraVolumes Define additional volumes for Anchore Reports Worker pods + ## + extraVolumes: [] + + ## @param reportsWorker.extraVolumeMounts Define additional volume mounts for Anchore Reports Worker pods + ## + extraVolumeMounts: [] + + ## @param reportsWorker.resources Resource requests and limits for Anchore Reports Worker pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # memory: 1000Mi + # cpu: 2000m + # limits: + # memory: 1000Mi + + ## @param reportsWorker.labels Labels for Anchore Reports Worker pods + ## + labels: {} + + ## @param reportsWorker.annotations Annotation for Anchore Reports Worker pods + ## + annotations: {} + + ## @param reportsWorker.nodeSelector Node labels for Anchore Reports Worker pod assignment + ## + nodeSelector: {} + + ## @param reportsWorker.tolerations Tolerations for Anchore Reports Worker pod assignment + ## + tolerations: [] + + ## @param reportsWorker.affinity Affinity for Anchore Reports Worker pod assignment + ## + affinity: {} + + ## @param reportsWorker.serviceAccountName Service account name for Anchore Reports Worker pods + ## + serviceAccountName: "" + +########################################### +## @section Anchore Simple Queue Parameters +########################################### +simpleQueue: + ## @param simpleQueue.replicaCount Number of replicas for the Anchore Simple Queue deployment + ## + replicaCount: 1 + + ## @param simpleQueue.service.type Service type for Anchore Simple Queue + ## @param simpleQueue.service.port Service port for Anchore Simple Queue + ## @param simpleQueue.service.annotations Annotations for Anchore Simple Queue service + ## @param simpleQueue.service.labels Labels for Anchore Simple Queue service + ## @param simpleQueue.service.nodePort nodePort for Anchore Simple Queue service + ## + service: + type: ClusterIP + port: 8083 + annotations: {} + labels: {} + nodePort: "" + + ## @param simpleQueue.extraEnv Set extra environment variables for Anchore Simple Queue pods + ## + extraEnv: [] + + ## @param simpleQueue.extraVolumes Define additional volumes for Anchore Simple Queue pods + ## + extraVolumes: [] + + ## @param simpleQueue.extraVolumeMounts Define additional volume mounts for Anchore Simple Queue pods + ## + extraVolumeMounts: [] + + ## @param simpleQueue.resources Resource requests and limits for Anchore Simple Queue pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 250m + # memory: 1000Mi + # limits: + # memory: 1000Mi + + ## @param simpleQueue.labels Labels for Anchore Simple Queue pods + ## + labels: {} + + ## @param simpleQueue.annotations Annotation for Anchore Simple Queue pods + ## + annotations: {} + + ## @param simpleQueue.nodeSelector Node labels for Anchore Simple Queue pod assignment + ## + nodeSelector: {} + + ## @param simpleQueue.tolerations Tolerations for Anchore Simple Queue pod assignment + ## + tolerations: [] + + ## @param simpleQueue.affinity Affinity for Anchore Simple Queue pod assignment + ## + affinity: {} + + ## @param simpleQueue.serviceAccountName Service account name for Anchore Simple Queue pods + ## + serviceAccountName: "" + +################################# +## @section Anchore UI Parameters +################################# +ui: + ## @param ui.image Image used for the Anchore UI container + ## + image: docker.io/anchore/enterprise-ui:v5.4.0 + + ## @param ui.imagePullPolicy Image pull policy for Anchore UI image + ## + imagePullPolicy: IfNotPresent + + ## @param ui.existingSecretName Name of an existing secret to be used for Anchore UI DB and Redis endpoints + ## This secret should define the following ENV vars + ## ANCHORE_APPDB_URI + ## ANCHORE_REDIS_URI + ## + # Set the name of your existing secret for the Anchore Enterprise UI + existingSecretName: anchore-enterprise-ui-env + + ## @param ui.ldapsRootCaCertName Name of the custom CA certificate file store in `.Values.certStoreSecretName` + ## + ldapsRootCaCertName: "" + + ## @param ui.service.type Service type for Anchore UI + ## @param ui.service.port Service port for Anchore UI + ## @param ui.service.annotations Annotations for Anchore UI service + ## @param ui.service.labels Labels for Anchore UI service + ## @param ui.service.sessionAffinity Session Affinity for Ui service + ## @param ui.service.nodePort nodePort for Anchore UI service + ## + service: + type: ClusterIP + port: 80 + ## TODO - add service specific annotations & labels to ALL services + annotations: {} + labels: {} + sessionAffinity: ClientIP + nodePort: "" + + + ## @param ui.extraEnv Set extra environment variables for Anchore UI pods + ## + extraEnv: [] + + ## @param ui.extraVolumes Define additional volumes for Anchore UI pods + ## + extraVolumes: [] + + ## @param ui.extraVolumeMounts Define additional volume mounts for Anchore UI pods + ## + extraVolumeMounts: [] + + ## @param ui.resources Resource requests and limits for Anchore UI pods + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## Commented values below are just a suggested baseline. Contact Anchore support for deployment specific recommendations. + ## + resources: {} + # requests: + # cpu: 100m + # memory: 5000Mi + # limits: + # memory: 5000Mi + + ## @param ui.labels Labels for Anchore UI pods + ## + labels: {} + + ## @param ui.annotations Annotation for Anchore UI pods + ## + annotations: {} + + ## @param ui.nodeSelector Node labels for Anchore UI pod assignment + ## + nodeSelector: {} + + ## @param ui.tolerations Tolerations for Anchore UI pod assignment + ## + tolerations: [] + + ## @param ui.affinity Affinity for Anchore ui pod assignment + ## + affinity: {} + + ## @param ui.serviceAccountName Service account name for Anchore UI pods + ## + serviceAccountName: "" + +############################################ +## @section Anchore Upgrade Job Parameters +## Upgrade job uses a Helm post-install-hook +############################################ +upgradeJob: + ## @param upgradeJob.enabled Enable the Anchore Enterprise database upgrade job + ## + enabled: true + + ## @param upgradeJob.force Force the Anchore Feeds database upgrade job to run as a regular job instead of as a Helm hook + ## + force: false + + ## @param upgradeJob.rbacCreate Create RBAC resources for the Anchore upgrade job + ## By default, the anchore upgrade job utilizes a service account that will be created to call kubectl to scale down the deployment before running the upgrade job. + ## The service account is granted deployment, deployment/scale, and pod permissions. See templates/hooks/pre-upgrade/db-upgrade-rbac.yaml for the full list of permissions + ## + rbacCreate: true + + ## @param upgradeJob.serviceAccountName Use an existing service account for the Anchore upgrade job + ## + serviceAccountName: "" + + ## @param upgradeJob.usePostUpgradeHook Use a Helm post-upgrade hook to run the upgrade job instead of the default pre-upgrade hook. This job does not require creating RBAC resources. + ## Uses the same mechanism for upgrades as the legacy anchore-engine chart. Not compatible with `helm upgrade --wait` or ArgoCD. + ## + usePostUpgradeHook: false + + ## @param upgradeJob.kubectlImage The image to use for the upgrade job's init container that uses kubectl to scale down deployments before an upgrade + ## This is only used in the preupgrade job. + ## + kubectlImage: bitnami/kubectl:1.27 + + ## @param upgradeJob.nodeSelector Node labels for the Anchore upgrade job pod assignment + ## + nodeSelector: {} + + ## @param upgradeJob.tolerations Tolerations for the Anchore upgrade job pod assignment + ## + tolerations: [] + + ## @param upgradeJob.affinity Affinity for the Anchore upgrade job pod assignment + ## + affinity: {} + + ## @param upgradeJob.annotations Annotations for the Anchore upgrade job + ## + annotations: {} + + ## @param upgradeJob.resources Resource requests and limits for the Anchore upgrade job + ## + resources: {} + + ## @param upgradeJob.labels Labels for the Anchore upgrade job + ## + labels: {} + + ## @param upgradeJob.ttlSecondsAfterFinished The time period in seconds the upgrade job, and it's related pods should be retained for + ## Defaults to 0 == immediate deletion after completion + ## Set this to -1 to disable deleting the job automatically (NOTE: This can cause issues with upgrades) + ## + ttlSecondsAfterFinished: -1 + +############################## +## @section Ingress Parameters +############################## +ingress: + ## @param ingress.enabled Create an ingress resource for external Anchore service APIs + ## ref: https://kubernetes.io/docs/user-guide/ingress/ + ## + enabled: false + + ## @param ingress.labels Labels for the ingress resource + ## + labels: {} + + ## @param ingress.annotations [object] Annotations for the ingress resource + ## By default this chart is setup to use an NGINX ingress controller, which needs to be installed & configured on your cluster + ## ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/ + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## ref: https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features + ## + annotations: {} + + ## @param ingress.apiHosts List of custom hostnames for the Anchore API + ## + apiHosts: [] + + ## @param ingress.apiPaths The path used for accessing the Anchore API + ## + apiPaths: + - /v2/ + - /version/ + + ## @param ingress.uiHosts List of custom hostnames for the Anchore UI + ## + uiHosts: [] + + ## @param ingress.uiPath The path used for accessing the Anchore UI + ## + uiPath: / + + ## @param ingress.feedsHosts List of custom hostnames for the Anchore Feeds API + ## + feedsHosts: [] + + ## @param ingress.feedsPaths The path used for accessing the Anchore Feeds API + ## Exposing the feeds API is for special cases only, use /v2/feeds for ingress.feedsPath + ## + feedsPaths: + - /v2/feeds/ + + ## @param ingress.tls Configure tls for the ingress resource + ## Secrets must be manually created in the release namespace + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + ## @param ingress.ingressClassName sets the ingress class name. As of k8s v1.18, this should be nginx + ## ref: # Reference: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: nginx + +######################################### +## @section Google CloudSQL DB Parameters +######################################### +cloudsql: + ## @param cloudsql.enabled Use CloudSQL proxy container for GCP database access + ## + enabled: false + + ## @param cloudsql.image Image to use for GCE CloudSQL Proxy + ## + image: gcr.io/cloudsql-docker/gce-proxy:1.25.0 + + ## @param cloudsql.imagePullPolicy Image Pull Policy to use for CloudSQL image + ## + imagePullPolicy: IfNotPresent + + ## @param cloudsql.instance CloudSQL instance, eg: 'project:zone:instancename' + ## + instance: "" + + ## @param cloudsql.useExistingServiceAcc Use existing service account + ## If using an existing Service Account, you must create a secret which includes the JSON token from Google's IAM + ## ref: https://cloud.google.com/sql/docs/postgres/authentication + ## + useExistingServiceAcc: false + + ## @param cloudsql.serviceAccSecretName + ## + serviceAccSecretName: "" + + ## @param cloudsql.serviceAccJsonName + ## + serviceAccJsonName: "" + + ## @param cloudsql.extraArgs a list of extra arguments to be passed into the cloudsql container command. eg + ## extraArgs: + ## - "-ip_address_types=PRIVATE" + ## - "-enable_iam_login" + ## + extraArgs: [] + +####################################### +## @section Anchore UI Redis Parameters +####################################### + +ui-redis: + ## @param ui-redis.chartEnabled Use the dependent chart for the UI Redis deployment + ## + chartEnabled: true + + ## @param ui-redis.externalEndpoint External Redis endpoint when not using Helm managed chart (eg redis://:@hostname:6379) + ## + externalEndpoint: "" + + ## @param ui-redis.auth.password Password used for connecting to Redis + ## + auth: + password: anchore-redis,123 + + ## @param ui-redis.architecture Redis deployment architecture + ## + architecture: standalone + + ## @param ui-redis.master.persistence.enabled enables persistence + master: + persistence: + enabled: false + +####################################### +## @section Anchore Database Parameters +####################################### +postgresql: + ## @param postgresql.chartEnabled Use the dependent chart for Postgresql deployment + ## + chartEnabled: true + + ## @param postgresql.externalEndpoint External Postgresql hostname when not using Helm managed chart (eg. mypostgres.myserver.io) + ## externalEndpoint, auth.username, auth.password, auth.database, & primary.service.ports.postgresql are required values for external Postgres + ## + externalEndpoint: "" + + ## @param postgresql.auth.username Username used to connect to postgresql + ## @param postgresql.auth.password Password used to connect to postgresql + ## @param postgresql.auth.database Database name used when connecting to postgresql + ## + auth: + username: anchore + password: anchore-postgres,123 + database: anchore + + primary: + ## @param postgresql.primary.service.ports.postgresql Port used to connect to Postgresql + ## + service: + ports: + postgresql: 5432 + + ## @param postgresql.primary.persistence.size Configure size of the persistent volume used with helm managed chart + ## + persistence: + size: 20Gi + + ## @param postgresql.primary.extraEnvVars An array to add extra environment variables + ## + extraEnvVars: [] + + ## @param postgresql.image.tag Specifies the image to use for this chart. + ## + image: + tag: 13.11.0-debian-11-r15 diff --git a/stable/feeds/.helmignore b/stable/feeds/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/stable/feeds/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/stable/feeds/Chart.lock b/stable/feeds/Chart.lock new file mode 100644 index 00000000..caa7f617 --- /dev/null +++ b/stable/feeds/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: postgresql + repository: oci://registry-1.docker.io/bitnamicharts + version: 12.5.9 +- name: postgresql + repository: oci://registry-1.docker.io/bitnamicharts + version: 12.5.9 +digest: sha256:6670b5909223600bcf27ee6d17622a027aabcff38256ac2352ca58ab0e059fc8 +generated: "2023-07-12T14:26:05.057884-07:00" diff --git a/stable/feeds/Chart.yaml b/stable/feeds/Chart.yaml new file mode 100644 index 00000000..2d50e449 --- /dev/null +++ b/stable/feeds/Chart.yaml @@ -0,0 +1,29 @@ +apiVersion: v2 +name: feeds +type: application +version: "2.4.1" +appVersion: "5.4.0" +kubeVersion: 1.23.x - 1.27.x || 1.23.x-x - 1.29.x-x +description: Anchore feeds service +keywords: + - "anchore" + - "anchore-feeds" +maintainers: + - name: zhill + email: zach@anchore.com + - name: btodhunter + email: bradyt@anchore.com + - name: hnguyen + email: hung.nguyen@anchore.com +icon: https://anchore.com/wp-content/uploads/2016/08/anchore.png +dependencies: + - name: postgresql + version: "12.5" + repository: "oci://registry-1.docker.io/bitnamicharts" + condition: feeds-db.chartEnabled + alias: feeds-db + - name: postgresql + version: "~12.5" + repository: "oci://registry-1.docker.io/bitnamicharts" + condition: gem-db.chartEnabled,anchoreConfig.feeds.drivers.gem.enabled + alias: gem-db diff --git a/stable/feeds/README.md b/stable/feeds/README.md new file mode 100644 index 00000000..613bfccc --- /dev/null +++ b/stable/feeds/README.md @@ -0,0 +1,528 @@ +# Anchore Enterprise Feeds Helm Chart + +> :exclamation: **Important:** View the **[Chart Release Notes](#release-notes)** for the latest changes prior to installation or upgrading. + +This Helm chart deploys the Anchore Enterprise Feeds service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Anchore Enterprise Feeds is an On-Premises service that supplies operating system and non-operating system vulnerability data and package data for consumption by Anchore Policy Engine. Policy Engine uses this data for finding vulnerabilities and evaluating policies. + +See the [Anchore Feeds Documentation](https://docs.anchore.com/current/docs/overview/feeds/) for more details. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Installing the Chart](#installing-the-chart) +- [Installing on Openshift](#installing-on-openshift) +- [Uninstalling the Chart](#uninstalling-the-chart) +- [Configuration](#configuration) + - [Feeds External Database Configuration](#feeds-external-database-configuration) + - [Feeds Driver Configuration](#feeds-driver-configuration) + - [Existing Secrets](#existing-secrets) + - [Ingress](#ingress) + - [Prometheus Metrics](#prometheus-metrics) +- [Parameters](#parameters) +- [Release Notes](#release-notes) + +## Prerequisites + +- [Helm](https://helm.sh/) >=3.8 +- [Kubernetes](https://kubernetes.io/) >=1.23 + +## Installing the Chart + +This guide covers deploying Anchore Enterprise on a Kubernetes cluster with the default configuration. + +This guide covers deploying Anchore Enterprise on a Kubernetes cluster with the default configuration. For production deployments, refer to the [Configuration](#configuration) section for additional guidance. + +1. **Create a Kubernetes Secret for License File**: Generate a Kubernetes secret to store your Anchore Enterprise license file. + + ```shell + export NAMESPACE=anchore + export LICENSE_PATH="license.yaml" + + kubectl create secret generic anchore-enterprise-license --from-file=license.yaml=${LICENSE_PATH} -n ${NAMESPACE} + ``` + +1. **Create a Kubernetes Secret for DockerHub Credentials**: Generate another Kubernetes secret for DockerHub credentials. These credentials should have access to private Anchore Enterprise repositories. We recommend that you create a brand new DockerHub user for these pull credentials. Contact [Anchore Support](https://get.anchore.com/contact/) to obtain access. + + ```shell + export NAMESPACE=anchore + export DOCKERHUB_PASSWORD="password" + export DOCKERHUB_USER="username" + export DOCKERHUB_EMAIL="example@email.com" + + kubectl create secret docker-registry anchore-enterprise-pullcreds --docker-server=docker.io --docker-username=${DOCKERHUB_USER} --docker-password=${DOCKERHUB_PASSWORD} --docker-email=${DOCKERHUB_EMAIL} -n ${NAMESPACE} + ``` + +1. **Add Chart Repository & Deploy Anchore Enterprise**: Create a custom values file, named `anchore_values.yaml`, to override any chart parameters. Refer to the [Parameters](#parameters) section for available options. + + > :exclamation: **Important**: Default passwords are specified in the chart. It's highly recommended to modify these before deploying. + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + helm repo add anchore https://charts.anchore.io + helm install ${RELEASE} -n ${NAMESPACE} anchore/feeds -f anchore_values.yaml + ``` + + > **Note**: This command installs Anchore Enterprise with a chart-managed PostgreSQL database, which may not be suitable for production use. See the [External Database](#external-database-requirements) section for details on using an external database. + +1. **Post-Installation Steps**: Anchore Enterprise will take some time to initialize. After the bootstrap phase, it will begin a vulnerability feed sync. Image analysis will show zero vulnerabilities until this sync is complete. This can take several hours based on the enabled feeds. + + > **Tip**: List all releases using `helm list` + +### Installing on Openshift + +By default, we assign the `securityContext.fsGroup`, `securityContext.runAsGroup`, and `securityContext.runAsUser` to `1000`. This will most likely fail on openshift for not being in the range determined by the `openshift.io/sa.scc.uid-range` annotation openshift attaches to the namespace when created. If using the chartEnabled postgresql, postgres will fail to come up as well due to this reason. + +1. Either disable the securityContext or set the appropriate values. +1. If using the chartEnabled postgres, you will also need to either disable the feeds-db.primary.podSecurityContext and feeds-db.primary.containerSecurityContext, or set the appropriate values for them + +Note: disabling the containerSecurityContext and podSecurityContext may not be suitable for production. See [Redhat's documentation](https://docs.openshift.com/container-platform/4.13/authentication/managing-security-context-constraints.html#managing-pod-security-policies) on what may be suitable for production. + +For more information on the openshift.io/sa.scc.uid-range annotation, see the [openshift docs](https://docs.openshift.com/dedicated/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth) + +```shell +helm install feedsy anchore/feeds \ + --set securityContext.fsGroup=null \ + --set securityContext.runAsGroup=null \ + --set securityContext.runAsUser=null \ + --set feeds-db.primary.containerSecurityContext.enabled=false \ + --set feeds-db.primary.podSecurityContext.enabled=false +``` + +#### Example OpenShift values file + +```yaml +# NOTE: This is not a production ready values file for an openshift deployment. +securityContext: + fsGroup: null + runAsGroup: null + runAsUser: null + +feeds-db: + primary: + containerSecurityContext: + enabled: false + podSecurityContext: + enabled: false +``` + +## Upgrading the Chart + +A Helm pre-upgrade hook initiates a Kubernetes job that scales down all active Anchore Feeds pods and handles the Anchore database upgrade. + +The Helm upgrade is marked as successful only upon the job's completion. This process causes the Helm client to pause until the job finishes and new Anchore Enterprise pods are initiated. To monitor the upgrade, follow the logs of the upgrade job, which is automatically removed after a successful Helm upgrade. + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + helm upgrade ${RELEASE} -n ${NAMESPACE} anchore/feeds -f anchore_values.yaml + ``` + +An optional post-upgrade hook is available to perform Anchore Feeds upgrades without forcing all pods to terminate prior to running the upgrade. This is the same upgrade behavior that was enabled by default in the legacy anchore-engine chart. To enable the post-upgrade hook, set `feedsUpgradeJob.usePostUpgradeHook=true` in your values file. + +## Uninstalling the Chart + +To completely remove the Anchore Feeds deployment and associated Kubernetes resources, follow the steps below: + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + helm delete ${RELEASE} -n ${NAMESPACE} + ``` + +After deleting the helm release, there are still a few persistent volume claims to delete. Delete these only if you're certain you no longer need them. + + ```shell + export NAMESPACE=anchore + export RELEASE=my-release + + kubectl get pvc -n ${NAMESPACE} + kubectl delete pvc ${RELEASE}-feeds -n ${NAMESPACE} + kubectl delete pvc ${RELEASE}-feeds-db -n ${NAMESPACE} + ``` + +## Configuration + +This section outlines the available configuration options for Anchore Enterprise. The default settings are specified in the bundled [values file](https://github.com/anchore/anchore-charts-dev/blob/main/stable/feeds/values.yaml). To customize these settings, create your own `anchore_values.yaml` file and populate it with the configuration options you wish to override. To apply your custom configuration during installation, pass your custom values file to the `helm install` command: + +```shell +export NAMESPACE=anchore +export RELEASE="my-release" + +helm install ${RELEASE} -n ${NAMESPACE} anchore/feeds -f custom_values.yaml +``` + +For additional guidance on customizing your Anchore Enterprise deployment, reach out to [Anchore Support](get.anchore.com/contact/). + +### Feeds External Database Configuration + +Anchore Enterprise Feeds require access to a Postgres-compatible database, version 12 or higher to operate. Note that this is a separate database from the primary Anchore Enterprise database. For Enterprise Feeds, an external database such as AWS RDS or Google CloudSQL is recommended for production deployments. The Helm chart provides a chart-managed database by default unless otherwise configured. + +See previous examples of configuring RDS Postgres and Google CloudSQL. + +```yaml +anchoreConfig: + database: + ssl: true + sslMode: require + +feeds-db: + # enabled: false disables the chart-managed Postgres instance; this is a Helmism + enabled: false + + # auth.username, auth.password & auth.database are required values for external Postgres + auth.password: + auth.username: + auth.database: + + # Required for external Postgres. + # Specify an external (already existing) Postgres deployment for use. + # Set to the host eg. mypostgres.myserver.io + externalEndpoint: +``` + +### Feeds Driver Configuration + +This service is comprised of different drivers for different vulnerability feeds. The drivers can be configured separately, and some drivers require a token or other credential. + +See the [Anchore Enterprise Feeds](https://docs.anchore.com/current/docs/configuration/feeds/) documentation for details. + +```yaml +anchoreConfig: + feeds: + drivers: + github: + enabled: true + # The GitHub feeds driver requires a GitHub developer personal access token with no permission scopes selected. + # See https://docs.github.com/en/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token + token: your-github-token + + # Enable microsoft feeds + msrc: + enabled: true +``` + +### Existing Secrets + +For deployments where version-controlled configurations are essential, it's advised to avoid storing credentials directly in values files. Instead, manually create Kubernetes secrets and reference them as existing secrets within your values files. When using existing secrets, the chart will load environment variables into deployments from the secret names specified by the following values: + +- `.Values.existingSecretName` [default: anchore-enterprise-feeds-env] + +To enable this feature, set the following values to `true` in your values file: + +```yaml +useExistingSecrets: true +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: anchore-enterprise-feeds-env + app: anchore +type: Opaque +stringData: + ANCHORE_ADMIN_PASSWORD: foobar1234 + ANCHORE_FEEDS_DB_NAME: anchore-feeds + ANCHORE_FEEDS_DB_USER: anchoreengine + ANCHORE_FEEDS_DB_PASSWORD: anchore-postgres,123 + ANCHORE_FEEDS_DB_HOST: anchore-enterprise-feeds-db + ANCHORE_FEEDS_DB_PORT: 5432 + # (if applicable) ANCHORE_SAML_SECRET: foobar,saml1234 + # (if applicable) ANCHORE_GITHUB_TOKEN: foobar,github1234 + # (if applicable) ANCHORE_NVD_API_KEY: foobar,nvd1234 + # (if applicable) ANCHORE_GEM_DB_NAME: anchore-gems + # (if applicable) ANCHORE_GEM_DB_USER: anchoregemsuser + # (if applicable) ANCHORE_GEM_DB_PASSWORD: foobar1234 + # (if applicable) ANCHORE_GEM_DB_HOST: anchorefeeds-gem-db.example.com:5432 +``` + +### Ingress + +[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) serves as the gateway to expose HTTP and HTTPS routes from outside the Kubernetes cluster to services within it. Routing is governed by rules specified in the Ingress resource. Kubernetes supports a variety of ingress controllers, such as AWS ALB and GCE controllers. + +This Helm chart includes a foundational ingress configuration that is customizable. You can expose various Anchore Enterprise external APIs, including the core API, UI, reporting, RBAC, and feeds, by editing the `ingress` section in your values file. + +Ingress is disabled by default in this Helm chart. To enable it, along with the [NGINX ingress controller](https://kubernetes.github.io/ingress-nginx/) for core API and UI routes, set the `ingress.enabled` value to `true`. + +```yaml +ingress: + enabled: true +``` + +#### ALB Ingress Controller + +The [Kubernetes ALB ingress controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) must be installed into the cluster for this configuration to work. + +```yaml +ingress: + enabled: true + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + ingressClassName: alb + + hosts: + - anchore-feeds.example.com + +service: + type: NodePort +``` + +#### GCE Ingress Controller + +The [Kubernetes GCE ingress controller](https://cloud.google.com/kubernetes-engine/docs/concepts/ingress) must be installed into the cluster for this configuration to work. + +```yaml +ingress: + enabled: true + ingressClassName: gce + paths: + - /v1/feeds/* + - /v2/feeds/* + + hosts: + - anchore-feeds.example.com + +service: + type: NodePort +``` + +### Prometheus Metrics + +Anchore Enterprise offers native support for exporting Prometheus metrics from each of its containers. When this feature is enabled, each service exposes metrics via its existing service port. If you're adding Prometheus manually to your deployment, you'll need to configure it to recognize each pod and its corresponding ports. + +```yaml +anchoreConfig: + metrics: + enabled: true + auth_disabled: true +``` + +## Parameters + +### Common Resource Parameters + +| Name | Description | Value | +| --------------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------------------- | +| `standalone` | Enable running the Anchore Feeds service in standalone mode | `true` | +| `url` | Set a custom feeds URL. Useful when using a feeds service endpoint that is external from the cluster. | `""` | +| `fullnameOverride` | overrides the fullname set on resources | `""` | +| `nameOverride` | overrides the name set on resources | `""` | +| `image` | Image used for feeds deployment | `docker.io/anchore/enterprise:v5.4.0` | +| `imagePullPolicy` | Image pull policy used by all deployments | `IfNotPresent` | +| `imagePullSecretName` | Name of Docker credentials secret for access to private repos | `anchore-enterprise-pullcreds` | +| `serviceAccountName` | Name of a service account used to run all Feeds pods | `""` | +| `injectSecretsViaEnv` | Enable secret injection into pod via environment variables instead of via k8s secrets | `false` | +| `licenseSecretName` | Name of the Kubernetes secret containing your license.yaml file | `anchore-enterprise-license` | +| `certStoreSecretName` | Name of secret containing the certificates & keys used for SSL, SAML & CAs | `""` | +| `extraEnv` | Common environment variables set on all containers | `[]` | +| `labels` | Common labels set on all Kubernetes resources | `{}` | +| `annotations` | Common annotations set on all Kubernetes resources | `{}` | +| `resources` | Resource requests and limits for Anchore Feeds pods | `{}` | +| `nodeSelector` | Node labels for Anchore Feeds pod assignment | `{}` | +| `tolerations` | Tolerations for Anchore Feeds pod assignment | `[]` | +| `affinity` | Affinity for Anchore Feeds pod assignment | `{}` | +| `service.type` | Service type for Anchore Feeds | `ClusterIP` | +| `service.port` | Service port for Anchore Feeds | `8448` | +| `service.annotations` | Annotations for Anchore Feeds service | `{}` | +| `service.labels` | Labels for Anchore Feeds service | `{}` | +| `service.nodePort` | nodePort for Anchore Feeds service | `""` | +| `scratchVolume.mountPath` | The mount path of an external volume for scratch space for image analysis | `/anchore_scratch` | +| `scratchVolume.fixGroupPermissions` | Enable an initContainer that will fix the fsGroup permissions | `false` | +| `scratchVolume.fixerInitContainerImage` | Set the container image for the permissions fixer init container | `alpine` | +| `scratchVolume.details` | Details for the k8s volume to be created | `{}` | +| `persistence.enabled` | Enable mounting an external volume for feeds driver workspace | `true` | +| `persistence.fixGroupPermissions` | Enable an initContainer that will fix the fsGroup permissions | `false` | +| `persistence.resourcePolicy` | Resource policy Helm annotation on PVC. Can be nil or "keep" | `keep` | +| `persistence.existingClaim` | Specify an existing volume claim | `""` | +| `persistence.storageClass` | Persistent volume storage class | `""` | +| `persistence.accessMode` | Access Mode for persistent volume | `ReadWriteOnce` | +| `persistence.size` | Size of persistent volume | `40Gi` | +| `persistence.mountPath` | Mount path on Anchore Feeds container for persistent volume | `/workspace` | +| `persistence.subPath` | Directory name used for persistent volume storage | `feeds-workspace` | +| `persistence.annotations` | Annotations for PVC | `{}` | +| `extraVolumes` | mounts additional volumes to each pod | `[]` | +| `extraVolumeMounts` | mounts additional volumes to each pod | `[]` | +| `securityContext.runAsUser` | The securityContext runAsUser for all Feeds pods | `1000` | +| `securityContext.runAsGroup` | The securityContext runAsGroup for all Feeds pods | `1000` | +| `securityContext.fsGroup` | The securityContext fsGroup for all Feeds pods | `1000` | +| `containerSecurityContext` | The securityContext for all Feeds containers | `{}` | +| `probes.liveness.initialDelaySeconds` | Initial delay seconds for liveness probe | `120` | +| `probes.liveness.timeoutSeconds` | Timeout seconds for liveness probe | `10` | +| `probes.liveness.periodSeconds` | Period seconds for liveness probe | `10` | +| `probes.liveness.failureThreshold` | Failure threshold for liveness probe | `6` | +| `probes.liveness.successThreshold` | Success threshold for liveness probe | `1` | +| `probes.readiness.timeoutSeconds` | Timeout seconds for the readiness probe | `10` | +| `probes.readiness.periodSeconds` | Period seconds for the readiness probe | `10` | +| `probes.readiness.failureThreshold` | Failure threshold for the readiness probe | `3` | +| `probes.readiness.successThreshold` | Success threshold for the readiness probe | `1` | +| `doSourceAtEntry.enabled` | Does a `source` of the file paths defined before starting Anchore services | `false` | +| `doSourceAtEntry.filePaths` | List of file paths to `source` before starting Anchore services | `[]` | +| `useExistingSecrets` | forgoes secret creation and uses the secret defined in existingSecretName | `false` | +| `existingSecretName` | Name of the existing secret to be used for Anchore Feeds Service | `anchore-enterprise-feeds-env` | +| `configOverride` | Allows for overriding the default Anchore configuration file | `{}` | +| `scripts` | Collection of helper scripts usable in all anchore enterprise pods | `{}` | + + +### Anchore Feeds Configuration Parameters + +| Name | Description | Value | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| `anchoreConfig.service_dir` | Path to directory where default Anchore configs are placed at startup | `/anchore_service` | +| `anchoreConfig.log_level` | The log level for Anchore services | `INFO` | +| `anchoreConfig.keys.secret` | The shared secret used for signing & encryption, auto-generated by Helm if not set | `""` | +| `anchoreConfig.keys.privateKeyFileName` | The file name of the private key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.keys.publicKeyFileName` | The file name of the public key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.user_authentication.oauth.enabled` | Enable OAuth for Anchore user authentication | `false` | +| `anchoreConfig.user_authentication.oauth.default_token_expiration_seconds` | The expiration, in seconds, for OAuth tokens | `3600` | +| `anchoreConfig.user_authentication.oauth.refresh_token_expiration_seconds` | The expiration, in seconds, for OAuth refresh tokens | `86400` | +| `anchoreConfig.user_authentication.hashed_passwords` | Enable storing passwords as secure hashes in the database | `false` | +| `anchoreConfig.user_authentication.sso_require_existing_users` | set to true in order to disable the SSO JIT provisioning during authentication | `false` | +| `anchoreConfig.metrics.enabled` | Enable Prometheus metrics for all Anchore services | `false` | +| `anchoreConfig.metrics.auth_disabled` | Disable auth on Prometheus metrics for all Anchore services | `false` | +| `anchoreConfig.database.timeout` | | `120` | +| `anchoreConfig.database.ssl` | Enable SSL/TLS for the database connection | `false` | +| `anchoreConfig.database.sslMode` | The SSL mode to use for database connection | `require` | +| `anchoreConfig.database.sslRootCertFileName` | File name of the database root CA certificate stored in the k8s secret specified with .Values.certStoreSecretName | `""` | +| `anchoreConfig.database.db_pool_size` | The database max connection pool size | `30` | +| `anchoreConfig.database.db_pool_max_overflow` | The maximum overflow size of the database connection pool | `100` | +| `anchoreConfig.database.engineArgs` | Set custom database engine arguments for SQLAlchemy | `{}` | +| `anchoreConfig.internalServicesSSL.enabled` | Force all Enterprise services to use SSL for internal communication | `false` | +| `anchoreConfig.internalServicesSSL.verifyCerts` | Enable cert verification against the local cert bundle, if this set to false self-signed certs are allowed | `false` | +| `anchoreConfig.internalServicesSSL.certSecretKeyFileName` | File name of the private key used for internal SSL stored in the secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.internalServicesSSL.certSecretCertFileName` | File name of the root CA certificate used for internal SSL stored in the secret specified in .Values.certStoreSecretName | `""` | +| `anchoreConfig.feeds.cycle_timers.driver_sync` | Time delay in seconds between consecutive driver runs for processing data | `7200` | +| `anchoreConfig.feeds.drivers.debian.releases` | Additional Debian feeds groups | `{}` | +| `anchoreConfig.feeds.drivers.ubuntu.releases` | Additional Ubuntu feed groups | `{}` | +| `anchoreConfig.feeds.drivers.npm.enabled` | Enable vulnerability drivers for npm data | `false` | +| `anchoreConfig.feeds.drivers.gem.enabled` | Enable vulnerability drivers for gem data | `false` | +| `anchoreConfig.feeds.drivers.gem.db_connect` | Defines the database endpoint used for loading the rubygems package data as a PostgreSQL dump | `postgresql://${ANCHORE_GEM_DB_USER}:${ANCHORE_GEM_DB_PASSWORD}@${ANCHORE_GEM_DB_HOST}:${ANCHORE_GEM_DB_PORT}/${ANCHORE_GEM_DB_NAME}` | +| `anchoreConfig.feeds.drivers.nvdv2.api_key` | The NVD API key value | `""` | +| `anchoreConfig.feeds.drivers.msrc.enabled` | Enable Microsoft feeds | `false` | +| `anchoreConfig.feeds.drivers.msrc.whitelist` | MSRC product IDs for generating feed data, this extends the pre-defined list of product IDs | `[]` | +| `anchoreConfig.feeds.drivers.github.enabled` | Enable GitHub advisory feeds (requires GitHub PAT) | `false` | +| `anchoreConfig.feeds.drivers.github.token` | GitHub developer personal access token with zero permission scopes | `""` | + + +### Anchore Feeds Database Parameters + +| Name | Description | Value | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------- | ----------------------- | +| `feeds-db.chartEnabled` | Use the dependent chart for Feeds Postgresql deployment | `true` | +| `feeds-db.externalEndpoint` | External Feeds Postgresql hostname when not using Helm managed chart (eg. mypostgres.myserver.io) | `""` | +| `feeds-db.auth.username` | Username used to connect to Postgresql | `anchore-feeds` | +| `feeds-db.auth.password` | Password used to connect to Postgresql | `anchore-postgres,123` | +| `feeds-db.auth.database` | Database name used when connecting to Postgresql | `anchore-feeds` | +| `feeds-db.primary.service.ports.postgresql` | Port used to connect to Postgresql | `5432` | +| `feeds-db.primary.persistence.size` | Configure size of the persistent volume used with helm managed chart | `20Gi` | +| `feeds-db.primary.extraEnvVars` | An array to add extra environment variables | `[]` | +| `feeds-db.image.tag` | Specifies the image to use for this chart. | `13.11.0-debian-11-r15` | + + +### Feeds Gem Database Parameters + +| Name | Description | Value | +| ----------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------- | +| `gem-db.chartEnabled` | Use the dependent chart for Postgresql deployment | | +| `gem-db.externalEndpoint` | External Postgresql hostname when not using Helm managed chart (eg. mypostgres.myserver.io) | `""` | +| `gem-db.auth.username` | Username used to connect to Postgresql | `anchore-gem-feeds` | +| `gem-db.auth.password` | Password used to connect to Postgresql | `anchore-postgres,123` | +| `gem-db.auth.database` | Database name used when connecting to Postgresql | `anchore-gem-feeds` | +| `gem-db.primary.service.ports.postgresql` | Port used to connect to Postgresql | `5432` | +| `gem-db.primary.persistence.size` | Configure size of the persistent volume used with helm managed chart | `20Gi` | +| `gem-db.primary.extraEnvVars` | An array to add extra environment variables | `[]` | +| `gem-db.image.tag` | Specifies the image to use for this chart. | `13.11.0-debian-11-r15` | + + +### Anchore Feeds Upgrade Job Parameters + +| Name | Description | Value | +| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `feedsUpgradeJob.enabled` | Enable the Anchore Feeds database upgrade job | `true` | +| `feedsUpgradeJob.force` | Force the Anchore Feeds database upgrade job to run as a regular job instead of as a Helm hook | `false` | +| `feedsUpgradeJob.rbacCreate` | Create RBAC resources for the upgrade job | `true` | +| `feedsUpgradeJob.serviceAccountName` | Use an existing service account for the upgrade job | `""` | +| `feedsUpgradeJob.usePostUpgradeHook` | Use a Helm post-upgrade hook to run the upgrade job instead of the default pre-upgrade hook. This job does not require creating RBAC resources. | `false` | +| `feedsUpgradeJob.kubectlImage` | The image to use for the upgrade job's init container that uses kubectl to scale down deployments before an upgrade | `bitnami/kubectl:1.27` | +| `feedsUpgradeJob.nodeSelector` | Node labels for the Anchore Feeds upgrade job pod assignment | `{}` | +| `feedsUpgradeJob.tolerations` | Tolerations for the Anchore Feeds upgrade job pod assignment | `[]` | +| `feedsUpgradeJob.affinity` | Affinity for the Anchore Feeds upgrade job pod assignment | `{}` | +| `feedsUpgradeJob.annotations` | Annotations for the Anchore Feeds upgrade job | `{}` | +| `feedsUpgradeJob.labels` | Labels for the Anchore Feeds upgrade job | `{}` | +| `feedsUpgradeJob.resources` | Resources for the Anchore Feeds upgrade job | `{}` | +| `feedsUpgradeJob.ttlSecondsAfterFinished` | The time period in seconds the upgrade job, and it's related pods should be retained for | `-1` | + + +### Ingress Parameters + +| Name | Description | Value | +| -------------------------- | ------------------------------------------------------------------ | ---------------- | +| `ingress.enabled` | Create an ingress resource for external Anchore service APIs | `false` | +| `ingress.labels` | Labels for the ingress resource | `{}` | +| `ingress.annotations` | Annotations for the ingress resource | `{}` | +| `ingress.hosts` | List of custom hostnames for the Anchore Feeds API | `[]` | +| `ingress.paths` | The path used for accessing the Anchore Feeds API | `["/v2/feeds/"]` | +| `ingress.tls` | Configure tls for the ingress resource | `[]` | +| `ingress.ingressClassName` | sets the ingress class name. As of k8s v1.18, this should be nginx | `nginx` | + + +### Google CloudSQL DB Parameters + +| Name | Description | Value | +| -------------------------------- | ------------------------------------------------------------------------------ | ----------------------------------------- | +| `cloudsql.enabled` | Use CloudSQL proxy container for GCP database access | `false` | +| `cloudsql.image` | Image to use for GCE CloudSQL Proxy | `gcr.io/cloudsql-docker/gce-proxy:1.25.0` | +| `cloudsql.imagePullPolicy` | Image Pull Policy to use for CloudSQL image | `IfNotPresent` | +| `cloudsql.instance` | CloudSQL instance, eg: 'project:zone:instancename' | `""` | +| `cloudsql.useExistingServiceAcc` | Use existing service account | `false` | +| `cloudsql.serviceAccSecretName` | | `""` | +| `cloudsql.serviceAccJsonName` | | `""` | +| `cloudsql.extraArgs` | a list of extra arguments to be passed into the cloudsql container command. eg | `[]` | + + +## Release Notes + +For the latest updates and features in Anchore Enterprise, see the official [Release Notes](https://docs.anchore.com/current/docs/releasenotes/). + +- **Major Chart Version Change (e.g., v0.1.2 -> v1.0.0)**: Signifies an incompatible breaking change that necessitates manual intervention, such as updates to your values file or data migrations. +- **Minor Chart Version Change (e.g., v0.1.2 -> v0.2.0)**: Indicates a significant change to the deployment that does not require manual intervention. +- **Patch Chart Version Change (e.g., v0.1.2 -> v0.1.3)**: Indicates a backwards-compatible bug fix or documentation update. + +### v2.4.0 + +- Update Anchore Feeds image to v5.4.0. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/540/) for more information. + +### v2.3.0 + +- Update Anchore Feeds image to v5.3.0. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/530/) for more information. +- Bump kubeVersion requirement to allow deployment on Kubernetes v1.29.x clusters. + +### v2.2.0 + +- Update Anchore Feeds image to v5.2.0. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/520/) for more information. +- Removes the `null` value from the default `ANCHORE_GITHUB_TOKEN` environment variable in the `anchore-enterprise-feeds-env` secret. This was causing issues with all feeds drivers if a token was not provided. + +### v2.1.0 + +- Update Anchore Feeds image to v5.1.0. See the [Release Notes](https://docs.anchore.com/current/docs/releasenotes/510/) for more information. + +### v2.0.0 + +- Updated Anchore Feeds image to v5.0.0 +- Anchore Feeds v5.0.0 introduces a breaking change to the API endpoints, and requires updating any external integrations to use the new endpoints. See the [Migration Guide](https://docs.anchore.com/current/docs/migration_guide/) for more information. +- The following values were removed as only the `v2` API is supported in Anchore Feeds 5.0.0: + - `feeds.service.apiVersion` + +### v1.0.0 + +- This is a stable release of the Anchore Feeds Helm chart and is recommended for production deployments. +- Deploys Anchore Feeds v4.9.3. + +### v0.x.x + +- This is a pre-release version of the Anchore Enterprise Helm chart and is not recommended for production deployments. diff --git a/stable/feeds/files/default_config.yaml b/stable/feeds/files/default_config.yaml new file mode 100644 index 00000000..956135e0 --- /dev/null +++ b/stable/feeds/files/default_config.yaml @@ -0,0 +1,133 @@ +service_dir: ${ANCHORE_SERVICE_DIR} +tmp_dir: ${ANCHORE_FEEDS_TMP_DIR} +log_level: ${ANCHORE_LOG_LEVEL} + +host_id: "${ANCHORE_HOST_ID}" +internal_ssl_verify: ${ANCHORE_INTERNAL_SSL_VERIFY} + +global_client_connect_timeout: ${ANCHORE_GLOBAL_CLIENT_CONNECT_TIMEOUT} +global_client_read_timeout: ${ANCHORE_GLOBAL_CLIENT_READ_TIMEOUT} +server_request_timeout_seconds: ${ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC} + +license_file: ${ANCHORE_LICENSE_FILE} +auto_restart_services: false + +max_source_import_size_mb: ${ANCHORE_MAX_IMPORT_SOURCE_SIZE_MB} +max_import_content_size_mb: ${ANCHORE_MAX_IMPORT_CONTENT_SIZE_MB} +max_compressed_image_size_mb: ${ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB} + +metrics: + enabled: ${ANCHORE_ENABLE_METRICS} + auth_disabled: true + +keys: + secret: ${ANCHORE_SAML_SECRET} + public_key_path: ${ANCHORE_AUTH_PRIVKEY} + private_key_path: ${ANCHORE_AUTH_PUBKEY} + +user_authentication: + hashed_passwords: ${ANCHORE_AUTH_ENABLE_HASHED_PASSWORDS} + sso_require_existing_users: ${ANCHORE_SSO_REQUIRES_EXISTING_USERS} + oauth: + enabled: ${ANCHORE_OAUTH_ENABLED} + default_token_expiration_seconds: ${ANCHORE_OAUTH_TOKEN_EXPIRATION} + refresh_token_expiration_seconds: ${ANCHORE_OAUTH_REFRESH_TOKEN_EXPIRATION} + +credentials: + database: + user: "${ANCHORE_FEEDS_DB_USER}" + password: "${ANCHORE_FEEDS_DB_PASSWORD}" + host: "${ANCHORE_FEEDS_DB_HOST}" + port: "${ANCHORE_FEEDS_DB_PORT}" + name: "${ANCHORE_FEEDS_DB_NAME}" + db_connect_args: + timeout: ${ANCHORE_FEEDS_DB_TIMEOUT} + ssl: ${ANCHORE_FEEDS_DB_SSL} + {{- if .Values.anchoreConfig.database.ssl }} + sslmode: ${ANCHORE_FEEDS_DB_SSL_MODE} + sslrootcert: ${ANCHORE_FEEDS_DB_SSL_ROOT_CERT} + {{- end }} + db_pool_size: ${ANCHORE_FEEDS_DB_POOL_SIZE} + db_pool_max_overflow: ${ANCHORE_FEEDS_DB_POOL_MAX_OVERFLOW} + {{- with .Values.anchoreConfig.database.engineArgs }} + db_engine_args: {{- toYaml . | nindent 6 }} + {{- end }} + +services: + feeds: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + cycle_timers: {{- toYaml .Values.anchoreConfig.feeds.cycle_timers | nindent 6 }} + local_workspace: ${ANCHORE_FEEDS_LOCAL_WORKSPACE} + workspace_preload: + enabled: ${ANCHORE_FEEDS_WORKSPACE_PRELOAD} + workspace_preload_file: ${ANCHORE_FEEDS_LOCAL_WORKSPACE}/data.tar.gz + api_only: ${ANCHORE_FEEDS_API_ONLY} + drivers: + amzn: + enabled: ${ANCHORE_FEEDS_DRIVER_AMAZON_ENABLED} + alpine: + enabled: ${ANCHORE_FEEDS_DRIVER_ALPINE_ENABLED} + centos: + enabled: false + debian: + enabled: ${ANCHORE_FEEDS_DRIVER_DEBIAN_ENABLED} + {{- with .Values.anchoreConfig.feeds.drivers.debian.releases }} + releases: {{- toYaml . | nindent 10 }} + {{- end }} + ol: + enabled: ${ANCHORE_FEEDS_DRIVER_OL_ENABLED} + ubuntu: + enabled: ${ANCHORE_FEEDS_DRIVER_UBUNTU_ENABLED} + git_url: ${ANCHORE_FEEDS_DRIVER_UBUNTU_URL} + git_branch: ${ANCHORE_FEEDS_DRIVER_UBUNTU_BRANCH} + {{- with .Values.anchoreConfig.feeds.drivers.ubuntu.releases }} + releases: {{- toYaml . | nindent 10 }} + {{- end }} + rhel: + enabled: ${ANCHORE_FEEDS_DRIVER_RHEL_ENABLED} + concurrency: ${ANCHORE_FEEDS_DRIVER_RHEL_CONCURRENCY} + npm: + enabled: ${ANCHORE_FEEDS_DRIVER_NPM_ENABLED} + gem: + enabled: ${ANCHORE_FEEDS_DRIVER_GEM_ENABLED} + {{- if .Values.anchoreConfig.feeds.drivers.gem.enabled }} + db_connect: {{ .Values.anchoreConfig.feeds.drivers.gem.db_connect }} + {{- end }} + nvdv2: + enabled: ${ANCHORE_FEEDS_DRIVER_NVDV2_ENABLED} + api_key: ${ANCHORE_NVD_API_KEY} + mariner: + enabled: ${ANCHORE_FEEDS_DRIVER_MARINER_ENABLED} + msrc: + enabled: ${ANCHORE_FEEDS_DRIVER_MSRC_ENABLED} + {{- with .Values.anchoreConfig.feeds.drivers.msrc.whitelist }} + whitelist: + - {{ . }} + {{- end }} + github: + enabled: ${ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED} + token: ${ANCHORE_GITHUB_TOKEN} + grypedb: + enabled: ${ANCHORE_FEEDS_DRIVER_GRYPEDB_ENABLED} + external_feeds_url: ${ANCHORE_FEEDS_EXTERNAL_URL} + preload: + enabled: ${ANCHORE_FEEDS_GRYPEDB_PRELOAD_ENABLED} + workspace_archive_path: ${ANCHORE_FEEDS_GRYPEDB_PRELOAD_PATH} + persist_provider_workspaces: ${ANCHORE_FEEDS_GRYPEDB_PERSIST_WORKSPACE} + restore_provider_workspaces: ${ANCHORE_FEEDS_GRYPEDB_RESTORE_WORKSPACE} + sles: + enabled: ${ANCHORE_FEEDS_DRIVER_SLES_ENABLED} + anchore_match_exclusions: + enabled: ${ANCHORE_FEEDS_DRIVER_MATCH_EXCLUSIONS} + wolfi: + enabled: ${ANCHORE_FEEDS_DRIVER_WOLFI_ENABLED} + chainguard: + enabled: ${ANCHORE_FEEDS_DRIVER_CHAINGUARD_ENABLED} diff --git a/stable/feeds/templates/NOTES.txt b/stable/feeds/templates/NOTES.txt new file mode 100644 index 00000000..36ca2090 --- /dev/null +++ b/stable/feeds/templates/NOTES.txt @@ -0,0 +1,8 @@ +The Feeds API can be accessed via port {{ .Values.service.port }} on the following DNS name from within the cluster: + + {{ include "feeds.fullname" . -}}.{{- .Release.Namespace -}}.svc.cluster.local + +Initial setup time can be >120sec for postgresql setup and readiness checks to pass for the services as indicated by pod state. +You can check with: + + kubectl get pods -l app.kubernetes.io/name={{- template "feeds.fullname" . -}},app.kubernetes.io/component=feeds diff --git a/stable/feeds/templates/_common.tpl b/stable/feeds/templates/_common.tpl new file mode 100644 index 00000000..f4f2bf3f --- /dev/null +++ b/stable/feeds/templates/_common.tpl @@ -0,0 +1,81 @@ +{{/* +Common annotations +*/}} +{{- define "feeds.common.annotations" -}} +{{- if and (not .nil) (not .Values.annotations) }} + {{- print "{}" }} +{{- else }} + {{- with .Values.annotations }} + {{- toYaml . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Setup a container for the cloudsql proxy to run in all pods when .Values.cloudsql.enabled = true +*/}} +{{- define "feeds.common.cloudsqlContainer" -}} +- name: cloudsql-proxy + image: {{ .Values.cloudsql.image }} + imagePullPolicy: {{ .Values.cloudsql.imagePullPolicy }} +{{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 4 }} +{{- end }} + command: ["/cloud_sql_proxy"] + args: + - "-instances={{ .Values.cloudsql.instance }}=tcp:5432" + {{- if .Values.cloudsql.extraArgs }} + {{- range $arg := .Values.cloudsql.extraArgs }} + - {{ quote $arg }} + {{- end }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - "-credential_file=/var/{{ .Values.cloudsql.serviceAccSecretName }}/{{ .Values.cloudsql.serviceAccJsonName }}" + volumeMounts: + - mountPath: /var/{{ .Values.cloudsql.serviceAccSecretName }} + name: {{ .Values.cloudsql.serviceAccSecretName }} + readOnly: true +{{- end }} +{{- end -}} + +{{/* +Common environment variables +*/}} +{{- define "feeds.common.environment" -}} +{{- with .Values.extraEnv }} + {{- toYaml . }} +{{- end }} +- name: ANCHORE_HOST_ID + valueFrom: + fieldRef: + fieldPath: metadata.name +- name: ANCHORE_ENDPOINT_HOSTNAME + value: {{ template "feeds.fullname" . }} +- name: ANCHORE_PORT + value: {{ .Values.service.port | quote }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "feeds.common.labels" -}} +app.kubernetes.io/name: {{ template "feeds.fullname" . }} +app.kubernetes.io/component: feeds +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +app.kubernetes.io/part-of: anchore +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- with .Values.labels }} +{{ toYaml . }} +{{- end }} +{{- end -}} + +{{/* +Return anchore default selector match labels +*/}} +{{- define "feeds.common.matchLabels" -}} +app.kubernetes.io/name: {{ template "feeds.fullname" . }} +app.kubernetes.io/component: feeds +{{- end -}} diff --git a/stable/feeds/templates/_helpers.tpl b/stable/feeds/templates/_helpers.tpl new file mode 100644 index 00000000..05da1977 --- /dev/null +++ b/stable/feeds/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Create feeds database hostname string from supplied values file. Used for setting the ANCHORE_FEEDS_DB_HOST env var in the Feeds secret. +*/}} +{{- define "feeds.dbHostname" -}} +{{- if and (index .Values "feeds-db" "externalEndpoint") (not (index .Values "feeds-db" "enabled")) }} + {{- print ( index .Values "feeds-db" "externalEndpoint" ) }} +{{- else if and (index .Values "cloudsql" "enabled") (not (index .Values "feeds-db" "enabled")) }} + {{- print "localhost" }} +{{- else }} + {{- $db_host := include "feeds-db.fullname" . }} + {{- printf "%s" $db_host }} +{{- end }} +{{- end -}} + +{{/* +Allows sourcing of a specified file in the entrypoint of all containers when .Values.doSourceAtEntry.enabled = true +*/}} +{{- define "feeds.doSourceFile" -}} +{{- if .Values.doSourceAtEntry.enabled }} + {{- range $index, $file := .Values.doSourceAtEntry.filePaths }} + {{- printf "if [ -f %v ];then source %v;fi;" $file $file }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Return the proper protocol when internal SSL is enabled +*/}} +{{- define "feeds.setProtocol" -}} +{{- if .Values.anchoreConfig.internalServicesSSL.enabled }} + {{- print "https" }} +{{- else }} + {{- print "http" }} +{{- end }} +{{- end -}} + +{{/* +Return a URL for the external feeds service +*/}} +{{- define "feeds.setGrypeProviderURL" -}} +{{- $grypeProviderFeedsExternalURL := "" }} +{{- $regexSearchPattern := (printf "/v2.*$" | toString) }} +{{- if .Values.url }} + {{- $urlPathSuffix := (default "" (regexFind $regexSearchPattern .Values.url) ) }} + {{- $anchoreFeedsHost := (trimSuffix $urlPathSuffix .Values.url) }} + {{- $grypeProviderFeedsExternalURL = (printf "%s/v2/" $anchoreFeedsHost) }} +{{- else }} + {{- $grypeProviderFeedsExternalURL = (printf "%s://%s:%s/v2/" (include "feeds.setProtocol" .) (include "feeds.fullname" .) (.Values.service.port | toString)) -}} +{{- end }} +{{- print $grypeProviderFeedsExternalURL }} +{{- end -}} diff --git a/stable/feeds/templates/_names.tpl b/stable/feeds/templates/_names.tpl new file mode 100644 index 00000000..b104ae40 --- /dev/null +++ b/stable/feeds/templates/_names.tpl @@ -0,0 +1,31 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} + +{{- define "feeds.fullname" -}} +{{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{- define "feeds.upgradeJob.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- $forcedRevision := "" -}} +{{- if .Values.feedsUpgradeJob.force }} +{{- $forcedRevision = printf "-forced-%s" (randAlphaNum 5 | lower) -}} +{{- end }} +{{- printf "%s-%s-%s-%s%s" .Release.Name $name (.Chart.AppVersion | replace "." "") "upgrade" $forcedRevision | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{- define "feeds-db.fullname" -}} +{{- printf "%s-%s" .Release.Name "feeds-db" | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{- define "gem-db.fullname" -}} +{{- printf "%s-%s" .Release.Name "gem-db" | trunc 63 | trimSuffix "-" }} +{{- end -}} diff --git a/stable/feeds/templates/configmap.yaml b/stable/feeds/templates/configmap.yaml new file mode 100644 index 00000000..ae8f66c0 --- /dev/null +++ b/stable/feeds/templates/configmap.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "feeds.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" . | nindent 4 }} +data: + config.yaml: | + # Anchore Feeds Service Configuration File, mounted from a configmap + # +{{- if .Values.configOverride }} +{{ toYaml .Values.configOverride | indent 4 }} +{{- else }} +{{ tpl (.Files.Get "files/default_config.yaml") . | indent 4 }} +{{- end }} diff --git a/stable/feeds/templates/deployment.yaml b/stable/feeds/templates/deployment.yaml new file mode 100644 index 00000000..82a7bf43 --- /dev/null +++ b/stable/feeds/templates/deployment.yaml @@ -0,0 +1,199 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "feeds.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" . | nindent 4 }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: {{- include "feeds.common.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "feeds.common.labels" . | nindent 8 }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 8 }} + {{- if not .Values.injectSecretsViaEnv }} + checksum/secrets: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + checksum/feeds-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.securityContext }} + securityContext: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.serviceAccountName }} + serviceAccountName: {{ . }} + {{- end }} + {{- with .Values.imagePullSecretName }} + imagePullSecrets: + - name: {{ . }} + {{- end }} + {{- if and (or .Values.scratchVolume.fixGroupPermissions .Values.persistence.fixGroupPermissions) .Values.securityContext.fsGroup }} + initContainers: + - name: mode-fixer + image: {{ .Values.scratchVolume.fixerInitContainerImage }} + securityContext: + runAsUser: 0 + volumeMounts: + {{- if .Values.scratchVolume.fixGroupPermissions }} + - name: "anchore-scratch" + mountPath: {{ .Values.scratchVolume.mountPath }} + {{- end }} + {{- if .Values.persistence.fixGroupPermissions }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + {{- if .Values.scratchVolume.fixGroupPermissions }} + - (chmod 0775 {{ .Values.scratchVolume.mountPath }}; chgrp {{ .Values.securityContext.fsGroup }} {{ .Values.scratchVolume.mountPath }} ) + {{- end }} + {{- if .Values.persistence.fixGroupPermissions }} + - (chmod 0775 {{ .Values.persistence.mountPath }}; chgrp {{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} ) + {{- end }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "feeds.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: "{{ .Chart.Name }}-feeds" + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/sh", "-c"] + args: + - {{ print (include "feeds.doSourceFile" .) }} /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade feeds + ports: + - containerPort: {{ .Values.service.port }} + name: feeds-api + envFrom: + - configMapRef: + name: {{ .Release.Name }}-feeds-config-env-vars + {{- if not .Values.standalone }} + - configMapRef: + name: {{ .Release.Name }}-enterprise-config-env-vars + {{- end }} + {{- if not .Values.injectSecretsViaEnv }} + {{- if .Values.useExistingSecrets }} + - secretRef: + name: {{ .Values.existingSecretName }} + {{- else }} + - secretRef: + name: {{ template "feeds.fullname" . }} + {{- end }} + {{- end }} + env: {{- include "feeds.common.environment" . | nindent 12 }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + - name: config-volume + mountPath: /config/config.yaml + subPath: config.yaml + - name: "anchore-scratch" + mountPath: {{ .Values.scratchVolume.mountPath }} + - name: anchore-license + mountPath: /home/anchore/license.yaml + subPath: license.yaml + - name: anchore-scripts + mountPath: /scripts + {{- if .Values.certStoreSecretName }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /health + port: feeds-api + scheme: {{ include "feeds.setProtocol" . | upper }} + initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }} + timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.liveness.periodSeconds }} + failureThreshold: {{ .Values.probes.liveness.failureThreshold }} + successThreshold: {{ .Values.probes.liveness.successThreshold }} + readinessProbe: + httpGet: + path: /health + port: feeds-api + scheme: {{ include "feeds.setProtocol" . | upper }} + timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.readiness.periodSeconds }} + failureThreshold: {{ .Values.probes.readiness.failureThreshold }} + successThreshold: {{ .Values.probes.readiness.successThreshold }} + resources: {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ default (include "feeds.fullname" .) .Values.persistence.existingClaim }} + {{- else }} + emptyDir: {} + {{- end }} + - name: config-volume + configMap: + name: {{ template "feeds.fullname" . }} + - name: anchore-scripts + configMap: + name: {{ printf "%s-%s-scripts" .Release.Name (ternary "feeds" "enterprise" .Values.standalone) }} + defaultMode: 0755 + - name: "anchore-scratch" + {{- if .Values.scratchVolume.details }} + {{- toYaml .Values.scratchVolume.details | nindent 10 }} + {{- else }} + emptyDir: {} + {{- end }} + - name: anchore-license + secret: + secretName: {{ .Values.licenseSecretName }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} + {{- end }} + {{- with .Values.certStoreSecretName }} + - name: certs + secret: + secretName: {{ . }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: {{- toYaml . | nindent 8 }} + {{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "feeds.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + selector: {{- include "feeds.common.matchLabels" . | nindent 4 }} + ports: + - name: feeds-api + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + {{- with .Values.service.nodePort }} + nodePort: {{ . }} + {{- end }} diff --git a/stable/feeds/templates/envvars_configmap.yaml b/stable/feeds/templates/envvars_configmap.yaml new file mode 100644 index 00000000..568a328c --- /dev/null +++ b/stable/feeds/templates/envvars_configmap.yaml @@ -0,0 +1,94 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-feeds-config-env-vars + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" . | nindent 4 }} +data: + ANCHORE_AUTH_ENABLE_HASHED_PASSWORDS: "{{ .Values.anchoreConfig.user_authentication.hashed_passwords }}" +{{- with .Values.anchoreConfig.keys.publicKeyFileName }} + ANCHORE_AUTH_PRIVKEY: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_AUTH_PRIVKEY: "null" +{{- end }} +{{- with .Values.anchoreConfig.keys.privateKeyFileName }} + ANCHORE_AUTH_PUBKEY: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_AUTH_PUBKEY: "null" +{{- end }} + ANCHORE_CLI_URL: "http://localhost:8228" + ANCHORE_CLI_USER: "admin" + ANCHORECTL_URL: "http://localhost:8228" + ANCHORECTL_USERNAME: "admin" + ANCHORE_DISABLE_METRICS_AUTH: "{{ .Values.anchoreConfig.metrics.auth_disabled }}" + ANCHORE_ENABLE_METRICS: "{{ .Values.anchoreConfig.metrics.enabled }}" + ANCHORE_FEEDS_API_ONLY: "false" + ANCHORE_FEEDS_DB_POOL_MAX_OVERFLOW: "{{ .Values.anchoreConfig.database.db_pool_max_overflow }}" + ANCHORE_FEEDS_DB_POOL_SIZE: "{{ .Values.anchoreConfig.database.db_pool_size }}" + ANCHORE_FEEDS_DB_SSL: "{{ .Values.anchoreConfig.database.ssl }}" + ANCHORE_FEEDS_DB_SSL_MODE: "{{ .Values.anchoreConfig.database.sslMode }}" +{{- with .Values.anchoreConfig.database.sslRootCertFileName }} + ANCHORE_FEEDS_DB_SSL_ROOT_CERT: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_FEEDS_DB_SSL_ROOT_CERT: "null" +{{- end }} + ANCHORE_FEEDS_DB_TIMEOUT: "{{ .Values.anchoreConfig.database.timeout }}" + ANCHORE_FEEDS_DRIVER_ALPINE_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_AMAZON_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_CHAINGUARD_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_DEBIAN_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_GEM_ENABLED: "{{ .Values.anchoreConfig.feeds.drivers.gem.enabled }}" + ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED: "{{ .Values.anchoreConfig.feeds.drivers.github.enabled }}" + ANCHORE_FEEDS_DRIVER_GRYPEDB_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_MARINER_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_MATCH_EXCLUSIONS: "true" + ANCHORE_FEEDS_DRIVER_MSRC_ENABLED: "{{ .Values.anchoreConfig.feeds.drivers.msrc.enabled }}" + ANCHORE_FEEDS_DRIVER_NPM_ENABLED: "{{ .Values.anchoreConfig.feeds.drivers.npm.enabled }}" + ANCHORE_FEEDS_DRIVER_NVDV2_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_OL_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_RHEL_CONCURRENCY: "5" + ANCHORE_FEEDS_DRIVER_RHEL_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_SLES_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_UBUNTU_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_UBUNTU_URL: "https://git.launchpad.net/ubuntu-cve-tracker" + ANCHORE_FEEDS_DRIVER_UBUNTU_BRANCH: "master" + ANCHORE_FEEDS_DRIVER_WOLFI_ENABLED: "true" + ANCHORE_FEEDS_EXTERNAL_URL: "{{ template "feeds.setGrypeProviderURL" . }}" + ANCHORE_FEEDS_LOCAL_WORKSPACE: "{{ .Values.persistence.mountPath }}" + ANCHORE_FEEDS_GRYPEDB_PRELOAD_ENABLED: "true" + ANCHORE_FEEDS_GRYPEDB_PRELOAD_PATH: "/preload/grype-db-workspace.tar.gz" + ANCHORE_FEEDS_GRYPEDB_PERSIST_WORKSPACE: "true" + ANCHORE_FEEDS_GRYPEDB_RESTORE_WORKSPACE: "true" + ANCHORE_FEEDS_PACKAGES_ENABLED: "false" + ANCHORE_FEEDS_TMP_DIR: "{{ .Values.scratchVolume.mountPath }}" + ANCHORE_FEEDS_WORKSPACE_PRELOAD: "true" + ANCHORE_GITHUB_TOKEN: "default-unset" + ANCHORE_GLOBAL_CLIENT_CONNECT_TIMEOUT: "0" + ANCHORE_GLOBAL_CLIENT_READ_TIMEOUT: "0" + ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC: "180" + ANCHORE_INTERNAL_SSL_VERIFY: "{{ .Values.anchoreConfig.internalServicesSSL.verifyCerts }}" + ANCHORE_LICENSE_FILE: "/home/anchore/license.yaml" + ANCHORE_LOG_LEVEL: "{{ .Values.anchoreConfig.log_level }}" + ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB: "-1" + ANCHORE_MAX_IMPORT_CONTENT_SIZE_MB: "100" + ANCHORE_MAX_IMPORT_SOURCE_SIZE_MB: "100" + ANCHORE_MAX_REQUEST_THREADS: "50" + ANCHORE_NVD_API_KEY: "null" + ANCHORE_OAUTH_ENABLED: "{{ .Values.anchoreConfig.user_authentication.oauth.enabled }}" + ANCHORE_OAUTH_TOKEN_EXPIRATION: "{{ .Values.anchoreConfig.user_authentication.oauth.default_token_expiration_seconds }}" + ANCHORE_OAUTH_REFRESH_TOKEN_EXPIRATION: "{{ .Values.anchoreConfig.user_authentication.oauth.refresh_token_expiration_seconds }}" + ANCHORE_SAML_SECRET: "null" + ANCHORE_SERVICE_DIR: "{{ .Values.anchoreConfig.service_dir }}" + ANCHORE_SSL_ENABLED: "{{ .Values.anchoreConfig.internalServicesSSL.enabled }}" +{{- with .Values.anchoreConfig.internalServicesSSL.certSecretCertFileName }} + ANCHORE_SSL_CERT: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_SSL_CERT: "null" +{{- end }} +{{- with .Values.anchoreConfig.internalServicesSSL.certSecretKeyFileName }} + ANCHORE_SSL_KEY: "/home/anchore/certs/{{- . }}" +{{- else }} + ANCHORE_SSL_KEY: "null" +{{- end }} + ANCHORE_SSO_REQUIRES_EXISTING_USERS: "{{ .Values.anchoreConfig.user_authentication.sso_require_existing_users }}" diff --git a/stable/feeds/templates/hooks/post-upgrade/upgrade_job.yaml b/stable/feeds/templates/hooks/post-upgrade/upgrade_job.yaml new file mode 100644 index 00000000..44bdfaa4 --- /dev/null +++ b/stable/feeds/templates/hooks/post-upgrade/upgrade_job.yaml @@ -0,0 +1,116 @@ +{{- if and .Values.feedsUpgradeJob.enabled .Values.feedsUpgradeJob.usePostUpgradeHook -}} + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "feeds.upgradeJob.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 4 }} + {{- if not .Values.feedsUpgradeJob.force }} + "helm.sh/hook": post-upgrade + "helm.sh/hook-weight": "0" + {{- end }} +spec: + template: + metadata: + name: {{ template "feeds.upgradeJob.fullname" . }} + labels: {{- include "feeds.common.labels" . | nindent 8 }} + annotations: {{- include "feeds.common.annotations" . | nindent 8 }} + spec: + {{- with .Values.securityContext }} + securityContext: {{- toYaml . | nindent 8 }} + {{- end }} + + {{- if .Values.feedsUpgradeJob.serviceAccountName }} + {{- with .Values.feedsUpgradeJob.serviceAccountName }} + serviceAccountName: {{ . }} + {{- end }} + {{- else if .Values.serviceAccountName }} + {{- with .Values.serviceAccountName }} + serviceAccountName: {{ . }} + {{- end }} + {{- end }} + + {{- with .Values.imagePullSecretName }} + imagePullSecrets: + - name: {{ . }} + {{- end }} + restartPolicy: Never + {{- with .Values.feedsUpgradeJob.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.feedsUpgradeJob.affinity }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.feedsUpgradeJob.tolerations }} + tolerations: {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.certStoreSecretName .Values.cloudsql.useExistingServiceAcc .Values.extraVolumes }} + volumes: + {{- with .Values.certStoreSecretName }} + - name: certs + secret: + secretName: {{ . }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "feeds.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: feeds-upgrade + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: {{- toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/bash", "-c"] + args: + {{- if not .Values.anchoreConfig.database.ssl }} + - | + {{ print (include "feeds.doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}" upgrade --dontask; + {{- else if eq .Values.anchoreConfig.database.sslMode "require" }} + - | + {{ print (include "feeds.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode }} upgrade --dontask; + {{- else }} + - | + {{ print (include "feeds.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreConfig.database.sslRootCertFileName }} upgrade --dontask; + {{- end }} + envFrom: + - configMapRef: + name: {{ .Release.Name }}-feeds-config-env-vars + {{- if not .Values.standalone }} + - configMapRef: + name: {{ .Release.Name }}-enterprise-config-env-vars + {{- end }} + {{- if not .Values.injectSecretsViaEnv }} + {{- if .Values.useExistingSecrets }} + - secretRef: + name: {{ .Values.existingSecretName }} + {{- else }} + - secretRef: + name: {{ template "feeds.fullname" . }} + {{- end }} + {{- end }} + env: {{- include "feeds.common.environment" . | nindent 12 }} + volumeMounts: + {{- if (.Values.certStoreSecretName) }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.feedsUpgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/stable/feeds/templates/hooks/pre-upgrade/upgrade_job.yaml b/stable/feeds/templates/hooks/pre-upgrade/upgrade_job.yaml new file mode 100644 index 00000000..c7181f6b --- /dev/null +++ b/stable/feeds/templates/hooks/pre-upgrade/upgrade_job.yaml @@ -0,0 +1,161 @@ +{{- if and .Values.feedsUpgradeJob.enabled (not .Values.feedsUpgradeJob.usePostUpgradeHook) -}} + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "feeds.upgradeJob.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 4 }} + {{- if not .Values.feedsUpgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "3" + "helm.sh/hook-delete-policy": before-hook-creation + {{- end }} +spec: +{{- if ne (.Values.feedsUpgradeJob.ttlSecondsAfterFinished | quote) (-1 | quote) }} + ttlSecondsAfterFinished: {{ .Values.feedsUpgradeJob.ttlSecondsAfterFinished }} +{{- end }} + template: + metadata: + name: {{ template "feeds.upgradeJob.fullname" . }} + labels: {{- include "feeds.common.labels" . | nindent 8 }} + annotations: {{- include "feeds.common.annotations" . | nindent 8 }} + spec: + {{- with .Values.securityContext }} + securityContext: {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.feedsUpgradeJob.serviceAccountName }} + serviceAccountName: {{ .Values.feedsUpgradeJob.serviceAccountName }} + {{- else if .Values.feedsUpgradeJob.rbacCreate }} + serviceAccountName: {{ template "feeds.fullname" . }}-upgrade-sa + {{- else }} + serviceAccountName: {{ .Values.serviceAccountName }} + {{- end }} + {{- with .Values.imagePullSecretName }} + imagePullSecrets: + - name: {{ . }} + {{- end }} + restartPolicy: Never + {{- with (default .Values.nodeSelector .Values.feedsUpgradeJob.nodeSelector) }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with (default .Values.affinity .Values.feedsUpgradeJob.affinity) }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with (default .Values.tolerations .Values.feedsUpgradeJob.tolerations) }} + tolerations: {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.certStoreSecretName .Values.cloudsql.useExistingServiceAcc .Values.extraVolumes }} + volumes: + {{- with .Values.certStoreSecretName }} + - name: certs + secret: + secretName: {{ . }} + {{- end }} + {{- if .Values.cloudsql.useExistingServiceAcc }} + - name: {{ .Values.cloudsql.serviceAccSecretName }} + secret: + secretName: {{ .Values.cloudsql.serviceAccSecretName }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + initContainers: + - name: scale-down-anchore + image: {{ .Values.feedsUpgradeJob.kubectlImage }} + command: ["/bin/bash", "-c"] + args: + - | + kubectl scale deployments --all --replicas=0 -l app.kubernetes.io/name={{ template "feeds.fullname" . }}; + while [[ $(kubectl get pods -l app.kubernetes.io/name={{ template "feeds.fullname" . }} --field-selector=status.phase=Running --no-headers | tee /dev/stderr | wc -l) -gt 0 ]]; do + echo 'waiting for pods to go down...' && sleep 5; + done + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.feedsUpgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + - name: wait-for-db + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + env: {{- include "feeds.common.environment" . | nindent 12 }} + command: ["/bin/bash", "-c"] + args: + - | + while true; do + CONNSTR=postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}" + if [[ ${ANCHORE_FEEDS_DB_SSL_MODE} != null ]]; then + CONNSTR=${CONNSTR}?sslmode=${ANCHORE_FEEDS_DB_SSL_MODE} + fi + if [[ ${ANCHORE_FEEDS_DB_SSL_ROOT_CERT} != null ]]; then + CONNSTR=${CONNSTR}\&sslrootcert=${ANCHORE_FEEDS_DB_SSL_ROOT_CERT} + fi + err=$(anchore-enterprise-manager db --db-connect ${CONNSTR} pre-upgrade-check 2>&1 > /dev/null) + if [[ !$err ]]; then + echo "Database is ready" + exit 0 + fi + echo "Database is not ready yet, sleeping 10 seconds..." + sleep 10 + done + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.feedsUpgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + {{- include "feeds.common.cloudsqlContainer" . | nindent 8 }} + {{- end }} + - name: upgrade-feeds-db + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ .Release.Name }}-feeds-config-env-vars + {{- if not .Values.standalone }} + - configMapRef: + name: {{ .Release.Name }}-enterprise-config-env-vars + {{- end }} + {{- if not .Values.injectSecretsViaEnv }} + {{- if .Values.useExistingSecrets }} + - secretRef: + name: {{ .Values.existingSecretName }} + {{- else }} + - secretRef: + name: {{ template "feeds.fullname" . }} + {{- end }} + {{- end }} + env: {{- include "feeds.common.environment" . | nindent 12 }} + volumeMounts: + {{- if .Values.certStoreSecretName }} + - name: certs + mountPath: /home/anchore/certs/ + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.feedsUpgradeJob.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + command: ["/bin/bash", "-c"] + args: + {{- if not .Values.anchoreConfig.database.ssl }} + - | + {{ print (include "feeds.doSourceFile" .) }} anchore-enterprise-manager db --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}" upgrade --dontask; + {{- else if eq .Values.anchoreConfig.database.sslMode "require" }} + - | + {{ print (include "feeds.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode }} upgrade --dontask; + {{- else }} + - | + {{ print (include "feeds.doSourceFile" .) }} anchore-enterprise-manager db --db-use-ssl --db-connect postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}"?sslmode={{- .Values.anchoreConfig.database.sslMode -}}\&sslrootcert=/home/anchore/certs/{{- .Values.anchoreConfig.database.sslRootCertFileName }} upgrade --dontask; + {{- end }} +{{- end -}} diff --git a/stable/feeds/templates/hooks/pre-upgrade/upgrade_rbac.yaml b/stable/feeds/templates/hooks/pre-upgrade/upgrade_rbac.yaml new file mode 100644 index 00000000..f7f55279 --- /dev/null +++ b/stable/feeds/templates/hooks/pre-upgrade/upgrade_rbac.yaml @@ -0,0 +1,75 @@ +{{- if and .Values.feedsUpgradeJob.enabled .Values.feedsUpgradeJob.rbacCreate (not .Values.feedsUpgradeJob.usePostUpgradeHook) -}} + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "feeds.fullname" . }}-upgrade-sa + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 4 }} + {{- if not .Values.feedsUpgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + {{- end }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "feeds.fullname" . }}-upgrade-role-binding + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 4 }} + {{- if not .Values.feedsUpgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "feeds.fullname" . }}-upgrade-role +subjects: + - kind: ServiceAccount + name: {{ template "feeds.fullname" . }}-upgrade-sa + namespace: {{ .Release.Namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "feeds.fullname" . }}-upgrade-role + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 4 }} + {{- if not .Values.feedsUpgradeJob.force }} + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + {{- end }} +rules: + - apiGroups: + - extensions + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - apps + resources: + - deployments/scale + verbs: + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - watch + - list + - get +{{- end -}} diff --git a/stable/feeds/templates/ingress.yaml b/stable/feeds/templates/ingress.yaml new file mode 100644 index 00000000..4e98d14f --- /dev/null +++ b/stable/feeds/templates/ingress.yaml @@ -0,0 +1,78 @@ +{{- if .Values.ingress.enabled -}} +{{- $component := "ingress" -}} + +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} +apiVersion: networking.k8s.io/v1 +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ template "feeds.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + {{- with .Values.ingress.labels }} + {{- . | toYaml | nindent 4 }} + {{- end }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 4 }} + {{- with .Values.ingress.annotations }} + {{- . | toYaml | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range $hostIndex, $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + {{- range $pathIndex, $path := $.Values.ingress.paths }} + - path: {{ $path }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "feeds.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- else }} + backend: + serviceName: {{ template "feeds.fullname" $ }} + servicePort: {{ $.Values.service.port }} + {{- end }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + {{- range .Values.ingress.paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ template "feeds.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- else }} + backend: + serviceName: {{ template "feeds.fullname" $ }} + servicePort: {{ $.Values.service.port }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/stable/feeds/templates/pvc.yaml b/stable/feeds/templates/pvc.yaml new file mode 100644 index 00000000..5cc7978a --- /dev/null +++ b/stable/feeds/templates/pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "feeds.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" (merge (dict "nil" true) .) | nindent 4 }} + "helm.sh/resource-policy": {{ default "" .Values.persistence.resourcePolicy }} + {{- with .Values.persistence.annotations }} + {{ toYaml . | indent 4 }} + {{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} +{{- end }} +{{- end -}} diff --git a/stable/feeds/templates/scripts_configmap.yaml b/stable/feeds/templates/scripts_configmap.yaml new file mode 100644 index 00000000..f65aac75 --- /dev/null +++ b/stable/feeds/templates/scripts_configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.standalone }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-feeds-scripts + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" . | nindent 4 }} +data: + {{ .Values.scripts | toYaml | nindent 2 }} +{{- end }} diff --git a/stable/feeds/templates/secret.yaml b/stable/feeds/templates/secret.yaml new file mode 100644 index 00000000..158c3ccd --- /dev/null +++ b/stable/feeds/templates/secret.yaml @@ -0,0 +1,34 @@ +{{- if not .Values.useExistingSecrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feeds.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "feeds.common.labels" . | nindent 4 }} + annotations: {{- include "feeds.common.annotations" . | nindent 4 }} +type: Opaque +stringData: + ANCHORE_FEEDS_DB_NAME: {{ index .Values "feeds-db" "auth" "database" | quote }} + ANCHORE_FEEDS_DB_USER: {{ index .Values "feeds-db" "auth" "username" | quote }} + ANCHORE_FEEDS_DB_PASSWORD: {{ index .Values "feeds-db" "auth" "password" | quote }} + ANCHORE_FEEDS_DB_HOST: {{ include "feeds.dbHostname" . | quote }} + ANCHORE_FEEDS_DB_PORT: {{ index .Values "feeds-db" "primary" "service" "ports" "postgresql" | quote }} +{{- with .Values.anchoreConfig.keys.secret }} + ANCHORE_SAML_SECRET: {{ . | quote }} +{{- end }} +{{- if .Values.anchoreConfig.feeds.drivers.github.enabled }} +{{- with .Values.anchoreConfig.feeds.drivers.github.token }} + ANCHORE_GITHUB_TOKEN: {{ . | quote }} +{{- end }} +{{- end }} +{{- with .Values.anchoreConfig.feeds.drivers.nvdv2.api_key }} + ANCHORE_NVD_API_KEY: {{ . | quote }} +{{- end }} +{{- if .Values.anchoreConfig.feeds.drivers.gem.enabled }} + ANCHORE_GEM_DB_HOST: {{ default (include "gem-db.fullname" .) (index .Values "gem-db" "externalEndpoint") | quote }} + ANCHORE_GEM_DB_NAME: {{ index .Values "gem-db" "auth" "database" | quote }} + ANCHORE_GEM_DB_USER: {{ index .Values "gem-db" "auth" "username" | quote }} + ANCHORE_GEM_DB_PASSWORD: {{ index .Values "gem-db" "auth" "password" | quote }} + ANCHORE_GEM_DB_PORT: {{ index .Values "gem-db" "primary" "service" "ports" "postgresql" | quote }} +{{- end }} +{{- end -}} diff --git a/stable/feeds/tests/__snapshot__/configmap_test.yaml.snap b/stable/feeds/tests/__snapshot__/configmap_test.yaml.snap new file mode 100644 index 00000000..3a3ff1ce --- /dev/null +++ b/stable/feeds/tests/__snapshot__/configmap_test.yaml.snap @@ -0,0 +1,224 @@ +should render the configmaps: + 1: | + apiVersion: v1 + data: + config.yaml: | + # Anchore Feeds Service Configuration File, mounted from a configmap + # + service_dir: ${ANCHORE_SERVICE_DIR} + tmp_dir: ${ANCHORE_FEEDS_TMP_DIR} + log_level: ${ANCHORE_LOG_LEVEL} + + host_id: "${ANCHORE_HOST_ID}" + internal_ssl_verify: ${ANCHORE_INTERNAL_SSL_VERIFY} + + global_client_connect_timeout: ${ANCHORE_GLOBAL_CLIENT_CONNECT_TIMEOUT} + global_client_read_timeout: ${ANCHORE_GLOBAL_CLIENT_READ_TIMEOUT} + server_request_timeout_seconds: ${ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC} + + license_file: ${ANCHORE_LICENSE_FILE} + auto_restart_services: false + + max_source_import_size_mb: ${ANCHORE_MAX_IMPORT_SOURCE_SIZE_MB} + max_import_content_size_mb: ${ANCHORE_MAX_IMPORT_CONTENT_SIZE_MB} + max_compressed_image_size_mb: ${ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB} + + metrics: + enabled: ${ANCHORE_ENABLE_METRICS} + auth_disabled: true + + keys: + secret: ${ANCHORE_SAML_SECRET} + public_key_path: ${ANCHORE_AUTH_PRIVKEY} + private_key_path: ${ANCHORE_AUTH_PUBKEY} + + user_authentication: + hashed_passwords: ${ANCHORE_AUTH_ENABLE_HASHED_PASSWORDS} + sso_require_existing_users: ${ANCHORE_SSO_REQUIRES_EXISTING_USERS} + oauth: + enabled: ${ANCHORE_OAUTH_ENABLED} + default_token_expiration_seconds: ${ANCHORE_OAUTH_TOKEN_EXPIRATION} + refresh_token_expiration_seconds: ${ANCHORE_OAUTH_REFRESH_TOKEN_EXPIRATION} + + credentials: + database: + user: "${ANCHORE_FEEDS_DB_USER}" + password: "${ANCHORE_FEEDS_DB_PASSWORD}" + host: "${ANCHORE_FEEDS_DB_HOST}" + port: "${ANCHORE_FEEDS_DB_PORT}" + name: "${ANCHORE_FEEDS_DB_NAME}" + db_connect_args: + timeout: ${ANCHORE_FEEDS_DB_TIMEOUT} + ssl: ${ANCHORE_FEEDS_DB_SSL} + db_pool_size: ${ANCHORE_FEEDS_DB_POOL_SIZE} + db_pool_max_overflow: ${ANCHORE_FEEDS_DB_POOL_MAX_OVERFLOW} + + services: + feeds: + enabled: true + require_auth: true + endpoint_hostname: ${ANCHORE_ENDPOINT_HOSTNAME} + listen: '0.0.0.0' + port: ${ANCHORE_PORT} + max_request_threads: ${ANCHORE_MAX_REQUEST_THREADS} + ssl_enable: ${ANCHORE_SSL_ENABLED} + ssl_cert: ${ANCHORE_SSL_CERT} + ssl_key: ${ANCHORE_SSL_KEY} + cycle_timers: + driver_sync: 7200 + local_workspace: ${ANCHORE_FEEDS_LOCAL_WORKSPACE} + workspace_preload: + enabled: ${ANCHORE_FEEDS_WORKSPACE_PRELOAD} + workspace_preload_file: ${ANCHORE_FEEDS_LOCAL_WORKSPACE}/data.tar.gz + api_only: ${ANCHORE_FEEDS_API_ONLY} + drivers: + amzn: + enabled: ${ANCHORE_FEEDS_DRIVER_AMAZON_ENABLED} + alpine: + enabled: ${ANCHORE_FEEDS_DRIVER_ALPINE_ENABLED} + centos: + enabled: false + debian: + enabled: ${ANCHORE_FEEDS_DRIVER_DEBIAN_ENABLED} + ol: + enabled: ${ANCHORE_FEEDS_DRIVER_OL_ENABLED} + ubuntu: + enabled: ${ANCHORE_FEEDS_DRIVER_UBUNTU_ENABLED} + git_url: ${ANCHORE_FEEDS_DRIVER_UBUNTU_URL} + git_branch: ${ANCHORE_FEEDS_DRIVER_UBUNTU_BRANCH} + rhel: + enabled: ${ANCHORE_FEEDS_DRIVER_RHEL_ENABLED} + concurrency: ${ANCHORE_FEEDS_DRIVER_RHEL_CONCURRENCY} + npm: + enabled: ${ANCHORE_FEEDS_DRIVER_NPM_ENABLED} + gem: + enabled: ${ANCHORE_FEEDS_DRIVER_GEM_ENABLED} + nvdv2: + enabled: ${ANCHORE_FEEDS_DRIVER_NVDV2_ENABLED} + api_key: ${ANCHORE_NVD_API_KEY} + mariner: + enabled: ${ANCHORE_FEEDS_DRIVER_MARINER_ENABLED} + msrc: + enabled: ${ANCHORE_FEEDS_DRIVER_MSRC_ENABLED} + github: + enabled: ${ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED} + token: ${ANCHORE_GITHUB_TOKEN} + grypedb: + enabled: ${ANCHORE_FEEDS_DRIVER_GRYPEDB_ENABLED} + external_feeds_url: ${ANCHORE_FEEDS_EXTERNAL_URL} + preload: + enabled: ${ANCHORE_FEEDS_GRYPEDB_PRELOAD_ENABLED} + workspace_archive_path: ${ANCHORE_FEEDS_GRYPEDB_PRELOAD_PATH} + persist_provider_workspaces: ${ANCHORE_FEEDS_GRYPEDB_PERSIST_WORKSPACE} + restore_provider_workspaces: ${ANCHORE_FEEDS_GRYPEDB_RESTORE_WORKSPACE} + sles: + enabled: ${ANCHORE_FEEDS_DRIVER_SLES_ENABLED} + anchore_match_exclusions: + enabled: ${ANCHORE_FEEDS_DRIVER_MATCH_EXCLUSIONS} + wolfi: + enabled: ${ANCHORE_FEEDS_DRIVER_WOLFI_ENABLED} + chainguard: + enabled: ${ANCHORE_FEEDS_DRIVER_CHAINGUARD_ENABLED} + kind: ConfigMap + metadata: + annotations: + bar: baz-annotation + foo: bar-annotation + labels: + app.kubernetes.io/component: feeds + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: test-release-feeds + app.kubernetes.io/part-of: anchore + app.kubernetes.io/version: 9.9.9 + bar: baz-label + foo: bar-label + helm.sh/chart: feeds-9.9.9 + name: test-release-feeds + namespace: test-namespace + 2: | + apiVersion: v1 + data: + ANCHORE_AUTH_ENABLE_HASHED_PASSWORDS: "false" + ANCHORE_AUTH_PRIVKEY: "null" + ANCHORE_AUTH_PUBKEY: "null" + ANCHORE_CLI_URL: http://localhost:8228 + ANCHORE_CLI_USER: admin + ANCHORE_DISABLE_METRICS_AUTH: "false" + ANCHORE_ENABLE_METRICS: "false" + ANCHORE_FEEDS_API_ONLY: "false" + ANCHORE_FEEDS_DB_POOL_MAX_OVERFLOW: "100" + ANCHORE_FEEDS_DB_POOL_SIZE: "30" + ANCHORE_FEEDS_DB_SSL: "false" + ANCHORE_FEEDS_DB_SSL_MODE: require + ANCHORE_FEEDS_DB_SSL_ROOT_CERT: "null" + ANCHORE_FEEDS_DB_TIMEOUT: "120" + ANCHORE_FEEDS_DRIVER_ALPINE_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_AMAZON_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_CHAINGUARD_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_DEBIAN_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_GEM_ENABLED: "false" + ANCHORE_FEEDS_DRIVER_GITHUB_ENABLED: "false" + ANCHORE_FEEDS_DRIVER_GRYPEDB_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_MARINER_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_MATCH_EXCLUSIONS: "true" + ANCHORE_FEEDS_DRIVER_MSRC_ENABLED: "false" + ANCHORE_FEEDS_DRIVER_NPM_ENABLED: "false" + ANCHORE_FEEDS_DRIVER_NVDV2_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_OL_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_RHEL_CONCURRENCY: "5" + ANCHORE_FEEDS_DRIVER_RHEL_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_SLES_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_UBUNTU_BRANCH: master + ANCHORE_FEEDS_DRIVER_UBUNTU_ENABLED: "true" + ANCHORE_FEEDS_DRIVER_UBUNTU_URL: https://git.launchpad.net/ubuntu-cve-tracker + ANCHORE_FEEDS_DRIVER_WOLFI_ENABLED: "true" + ANCHORE_FEEDS_EXTERNAL_URL: http://test-release-feeds:8448/v2/ + ANCHORE_FEEDS_GRYPEDB_PERSIST_WORKSPACE: "true" + ANCHORE_FEEDS_GRYPEDB_PRELOAD_ENABLED: "true" + ANCHORE_FEEDS_GRYPEDB_PRELOAD_PATH: /preload/grype-db-workspace.tar.gz + ANCHORE_FEEDS_GRYPEDB_RESTORE_WORKSPACE: "true" + ANCHORE_FEEDS_LOCAL_WORKSPACE: /workspace + ANCHORE_FEEDS_PACKAGES_ENABLED: "false" + ANCHORE_FEEDS_TMP_DIR: /anchore_scratch + ANCHORE_FEEDS_WORKSPACE_PRELOAD: "true" + ANCHORE_GITHUB_TOKEN: default-unset + ANCHORE_GLOBAL_CLIENT_CONNECT_TIMEOUT: "0" + ANCHORE_GLOBAL_CLIENT_READ_TIMEOUT: "0" + ANCHORE_GLOBAL_SERVER_REQUEST_TIMEOUT_SEC: "180" + ANCHORE_INTERNAL_SSL_VERIFY: "false" + ANCHORE_LICENSE_FILE: /home/anchore/license.yaml + ANCHORE_LOG_LEVEL: INFO + ANCHORE_MAX_COMPRESSED_IMAGE_SIZE_MB: "-1" + ANCHORE_MAX_IMPORT_CONTENT_SIZE_MB: "100" + ANCHORE_MAX_IMPORT_SOURCE_SIZE_MB: "100" + ANCHORE_MAX_REQUEST_THREADS: "50" + ANCHORE_NVD_API_KEY: "null" + ANCHORE_OAUTH_ENABLED: "false" + ANCHORE_OAUTH_REFRESH_TOKEN_EXPIRATION: "86400" + ANCHORE_OAUTH_TOKEN_EXPIRATION: "3600" + ANCHORE_SAML_SECRET: "null" + ANCHORE_SERVICE_DIR: /anchore_service + ANCHORE_SSL_CERT: "null" + ANCHORE_SSL_ENABLED: "false" + ANCHORE_SSL_KEY: "null" + ANCHORE_SSO_REQUIRES_EXISTING_USERS: "false" + ANCHORECTL_URL: http://localhost:8228 + ANCHORECTL_USERNAME: admin + kind: ConfigMap + metadata: + annotations: + bar: baz-annotation + foo: bar-annotation + labels: + app.kubernetes.io/component: feeds + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: test-release-feeds + app.kubernetes.io/part-of: anchore + app.kubernetes.io/version: 9.9.9 + bar: baz-label + foo: bar-label + helm.sh/chart: feeds-9.9.9 + name: test-release-feeds-config-env-vars + namespace: test-namespace diff --git a/stable/feeds/tests/__snapshot__/prehook_upgrade_resources_test.yaml.snap b/stable/feeds/tests/__snapshot__/prehook_upgrade_resources_test.yaml.snap new file mode 100644 index 00000000..b68b53ca --- /dev/null +++ b/stable/feeds/tests/__snapshot__/prehook_upgrade_resources_test.yaml.snap @@ -0,0 +1,148 @@ +rbac should match snapshot: + 1: | + apiVersion: v1 + kind: ServiceAccount + metadata: + annotations: + bar: baz-annotation + foo: bar-annotation + helm.sh/hook: pre-upgrade + helm.sh/hook-weight: "1" + labels: + app.kubernetes.io/component: feeds + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: test-release-feeds + app.kubernetes.io/part-of: anchore + app.kubernetes.io/version: 9.9.9 + bar: baz-label + foo: bar-label + helm.sh/chart: feeds-9.9.9 + name: test-release-feeds-upgrade-sa + namespace: test-namespace + 2: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + annotations: + bar: baz-annotation + foo: bar-annotation + helm.sh/hook: pre-upgrade + helm.sh/hook-weight: "1" + labels: + app.kubernetes.io/component: feeds + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: test-release-feeds + app.kubernetes.io/part-of: anchore + app.kubernetes.io/version: 9.9.9 + bar: baz-label + foo: bar-label + helm.sh/chart: feeds-9.9.9 + name: test-release-feeds-upgrade-role-binding + namespace: test-namespace + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-release-feeds-upgrade-role + subjects: + - kind: ServiceAccount + name: test-release-feeds-upgrade-sa + namespace: test-namespace + 3: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + annotations: + bar: baz-annotation + foo: bar-annotation + helm.sh/hook: pre-upgrade + helm.sh/hook-weight: "1" + labels: + app.kubernetes.io/component: feeds + app.kubernetes.io/instance: test-release + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: test-release-feeds + app.kubernetes.io/part-of: anchore + app.kubernetes.io/version: 9.9.9 + bar: baz-label + foo: bar-label + helm.sh/chart: feeds-9.9.9 + name: test-release-feeds-upgrade-role + namespace: test-namespace + rules: + - apiGroups: + - extensions + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - apps + resources: + - deployments/scale + verbs: + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - watch + - list + - get +should render proper initContainers: + 1: | + - args: + - | + kubectl scale deployments --all --replicas=0 -l app.kubernetes.io/name=test-release-feeds; + while [[ $(kubectl get pods -l app.kubernetes.io/name=test-release-feeds --field-selector=status.phase=Running --no-headers | tee /dev/stderr | wc -l) -gt 0 ]]; do + echo 'waiting for pods to go down...' && sleep 5; + done + command: + - /bin/bash + - -c + image: bitnami/kubectl:1.27 + name: scale-down-anchore + - args: + - | + while true; do + CONNSTR=postgresql://"${ANCHORE_FEEDS_DB_USER}":"${ANCHORE_FEEDS_DB_PASSWORD}"@"${ANCHORE_FEEDS_DB_HOST}":"${ANCHORE_FEEDS_DB_PORT}"/"${ANCHORE_FEEDS_DB_NAME}" + if [[ ${ANCHORE_FEEDS_DB_SSL_MODE} != null ]]; then + CONNSTR=${CONNSTR}?sslmode=${ANCHORE_FEEDS_DB_SSL_MODE} + fi + if [[ ${ANCHORE_FEEDS_DB_SSL_ROOT_CERT} != null ]]; then + CONNSTR=${CONNSTR}\&sslrootcert=${ANCHORE_FEEDS_DB_SSL_ROOT_CERT} + fi + err=$(anchore-enterprise-manager db --db-connect ${CONNSTR} pre-upgrade-check 2>&1 > /dev/null) + if [[ !$err ]]; then + echo "Database is ready" + exit 0 + fi + echo "Database is not ready yet, sleeping 10 seconds..." + sleep 10 + done + command: + - /bin/bash + - -c + env: + - name: foo + value: bar + - name: bar + value: baz + - name: ANCHORE_HOST_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ANCHORE_ENDPOINT_HOSTNAME + value: test-release-feeds + - name: ANCHORE_PORT + value: "8448" + image: docker.io/anchore/enterprise:v5.4.0 + imagePullPolicy: IfNotPresent + name: wait-for-db diff --git a/stable/feeds/tests/common_helpers_test.yaml b/stable/feeds/tests/common_helpers_test.yaml new file mode 100644 index 00000000..7893d7ab --- /dev/null +++ b/stable/feeds/tests/common_helpers_test.yaml @@ -0,0 +1,728 @@ +suite: Common Helper Template Tests +templates: + - envvars_configmap.yaml + - configmap.yaml + - deployment.yaml + - pvc.yaml + - secret.yaml + - ingress.yaml + - templates/hooks/pre-upgrade/upgrade_rbac.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + - templates/hooks/post-upgrade/upgrade_job.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +test_templates: &test_templates + - envvars_configmap.yaml + - configmap.yaml + - deployment.yaml + - pvc.yaml + - secret.yaml + - templates/hooks/pre-upgrade/upgrade_rbac.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + +flag_enabled_test_templates: &flag_enabled_test_templates + - templates/hooks/post-upgrade/upgrade_job.yaml + - ingress.yaml + +tests: + - it: should render global annotations + templates: *test_templates + documentIndex: 0 + set: + annotations: + foo: bar + bar: baz + asserts: + - isSubset: + path: metadata.annotations + content: + foo: bar + bar: baz + + - it: should render file checksum/secret annotation + template: deployment.yaml + documentIndex: 0 + asserts: + - exists: + path: spec.template.metadata.annotations['checksum/secrets'] + + - it: should render global labels + templates: *test_templates + documentIndex: 0 + set: + labels: + foo: bar + bar: baz + asserts: + - isSubset: + path: metadata.labels + content: + app.kubernetes.io/instance: test-release + helm.sh/chart: feeds-9.9.9 + app.kubernetes.io/version: 9.9.9 + app.kubernetes.io/part-of: anchore + app.kubernetes.io/managed-by: Helm + foo: bar + bar: baz + + - it: should render docker entrypoint with no doSourceAtEntry filePaths + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade + + - it: should render docker entrypoint with doSourceAtEntry and no filePaths + templates: + - deployment.yaml + documentIndex: 0 + set: + doSourceAtEntry.enabled: true + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade + + - it: should render docker entrypoint with doSourceAtEntry and some filePaths + templates: + - deployment.yaml + documentIndex: 0 + set: + doSourceAtEntry.enabled: true + doSourceAtEntry.filePaths: ["myscript.sh", "myotherscript.sh"] + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^if \[ -f myscript\.sh \];then source myscript\.sh;fi;if \[ -f myotherscript\.sh \];then source myotherscript\.sh;fi; .*$ + + - it: should render envFrom without an existing secret + templates: + - deployment.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0] + content: + envFrom: + - configMapRef: + name: test-release-feeds-config-env-vars + - secretRef: + name: test-release-feeds + + - it: should render envFrom with an existing secret + set: + useExistingSecrets: true + existingSecretName: "myExistingSecretName" + templates: + - deployment.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0] + content: + envFrom: + - configMapRef: + name: test-release-feeds-config-env-vars + - secretRef: + name: myExistingSecretName + + - it: should not render secretRef when injecting secrets via env + set: + injectSecretsViaEnv: true + useExistingSecrets: true + existingSecretName: "myExistingSecretName" + templates: + - deployment.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + asserts: + - isNotSubset: + path: spec.template.spec.containers[0] + content: + envFrom: + - configMapRef: + name: test-release-feeds-config-env-vars + - secretRef: + name: myExistingSecretName + + - it: should render correct environment variables when extraEnv is set + templates: + - deployment.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + extraEnv: + - name: foo + value: bar + - name: bar + value: baz + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: foo + value: bar + - contains: + path: spec.template.spec.containers[0].env + content: + name: bar + value: baz + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_HOST_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + + - it: should render clouddsql container for all deployments and default upgrade job + templates: + - deployment.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + cloudsql.enabled: true + asserts: + - contains: + path: spec.template.spec.containers + content: + name: cloudsql-proxy + command: + - /cloud_sql_proxy + args: + - "-instances==tcp:5432" + count: 1 + any: true + + - it: should render clouddsql container and extraArgs for all deployments and default upgrade job + templates: + - deployment.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + cloudsql.enabled: true + cloudsql.extraArgs: + - myExtraArg + - myOtherExtraArg + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: cloudsql-proxy + count: 1 + - equal: + path: spec.template.spec.containers[0].command + value: [ /cloud_sql_proxy ] + count: 1 + - equal: + path: spec.template.spec.containers[0].args + value: + - "-instances==tcp:5432" + - "myExtraArg" + - "myOtherExtraArg" + count: 1 + + - it: should render clouddsql container, additional arg, and volumeMount when useExistingServiceAcc is true for all deployments and default upgrade job + templates: + - deployment.yaml + - templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + cloudsql.enabled: true + cloudsql.extraArgs: + - myExtraArg + - myOtherExtraArg + cloudsql.useExistingServiceAcc: true + cloudsql.serviceAccSecretName: cloudsqlServiceAccSecretName + cloudsql.serviceAccJsonName: cloudsqlServiceAccJsonName + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: cloudsql-proxy + count: 1 + - equal: + path: spec.template.spec.containers[0].args + value: + - "-instances==tcp:5432" + - "myExtraArg" + - "myOtherExtraArg" + - "-credential_file=/var/cloudsqlServiceAccSecretName/cloudsqlServiceAccJsonName" + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: "/var/cloudsqlServiceAccSecretName" + name: "cloudsqlServiceAccSecretName" + readOnly: true + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: "cloudsqlServiceAccSecretName" + secret: + secretName: "cloudsqlServiceAccSecretName" + count: 1 + + - it: should render fixPermissionsInitContainer + set: + scratchVolume.fixGroupPermissions: true + securityContext.fsGroup: 9999 + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.initContainers + content: + name: mode-fixer + volumeMounts: + - name: "anchore-scratch" + mountPath: /anchore_scratch + args: + - (chmod 0775 /anchore_scratch; chgrp 9999 /anchore_scratch ) + command: + - /bin/sh + - -c + count: 1 + any: true + + - it: should not render fixPermissionsInitContainer + set: + scratchVolume.fixGroupPermissions: false + securityContext.fsGroup: 9999 + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - notExists: + path: spec.template.spec.initContainers + + - it: should render liveness probe + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + + - it: should render readiness probe + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + + - it: should render pod security contexts + set: + securityContext.runAsUser: 9999 + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.securityContext + content: + runAsUser: 9999 + runAsGroup: 1000 + fsGroup: 1000 + + - it: should render pod serviceAccountName + set: + serviceAccountName: myServiceAccountName + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: myServiceAccountName + + - it: should not render pod serviceAccountName if not defined + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - notExists: + path: spec.template.spec.serviceAccountName + + - it: should render imagePullSecretName + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: anchore-enterprise-pullcreds + + - it: should render set imagePullSecretName + set: + imagePullSecretName: mysecret + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: mysecret + + - it: should render container security contexts + set: + containerSecurityContext.runAsUser: 9999 + containerSecurityContext.runAsGroup: 9998 + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].securityContext + content: + runAsUser: 9999 + runAsGroup: 9998 + + - it: should not render container security contexts if none set + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - notExists: + path: spec.template.spec.containers[0].securityContext + + - it: should render the rbacAuthContainer + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers + content: + args: + - /docker-entrypoint.sh anchore-enterprise-manager service start --no-auto-upgrade feeds + name: feeds-feeds + ports: + - containerPort: 8448 + name: feeds-api + any: true + + - it: should render volumeMounts + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: anchore-license + mountPath: /home/anchore/license.yaml + subPath: license.yaml + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: config-volume + mountPath: /config/config.yaml + subPath: config.yaml + count: 1 + any: true + + - it: should render extraVolumeMounts + set: + extraVolumeMounts: + - name: my-volume + mountPath: /my/path + subPath: my-subpath + - name: my-volume-two + mountPath: /my/other/path + subPath: my-other-subpath + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: my-volume + mountPath: /my/path + subPath: my-subpath + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: my-volume-two + mountPath: /my/other/path + subPath: my-other-subpath + count: 1 + any: true + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: certs + count: 1 + any: true + + - it: should render certStore volumeMount and volume + set: + certStoreSecretName: mycerts + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: certs + mountPath: /home/anchore/certs/ + readOnly: true + count: 1 + any: true + - contains: + path: spec.template.spec.volumes + content: + name: certs + secret: + secretName: mycerts + count: 1 + any: true + + - it: should render global extraVolumes + set: + extraVolumes: + - name: my-volume + emptyDir: {} + - name: my-volume-two + emptyDir: {} + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: my-volume + emptyDir: {} + count: 1 + any: true + - contains: + path: spec.template.spec.volumes + content: + name: my-volume-two + emptyDir: {} + count: 1 + any: true + - notContains: + path: spec.template.spec.volumes + content: + name: certs + count: 1 + any: true + + - it: should render global volumes anchore-license, and config-volume + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-license + secret: + secretName: anchore-enterprise-license + count: 1 + any: true + - contains: + path: spec.template.spec.volumes + content: + name: config-volume + configMap: + name: test-release-feeds + count: 1 + any: true + + - it: should render feeds.fullname + templates: *test_templates + documentIndex: 0 + asserts: + - matchRegex: + path: metadata.name + pattern: ^test-release-feeds + + - it: should render feeds.fullname with fullnameOverride for services + set: + fullnameOverride: my-fullname-override + templates: + - deployment.yaml + documentIndex: 1 + asserts: + - matchRegex: + path: spec.selector["app.kubernetes.io/name"] + pattern: ^my-fullname-override + + - it: should render feeds.fullname with nameOverride for services + set: + nameOverride: my-name-override + templates: + - deployment.yaml + documentIndex: 1 + asserts: + - matchRegex: + path: spec.selector["app.kubernetes.io/name"] + pattern: ^test-release-my-name-override + + - it: should render dbHostname for secret.yaml + templates: + - secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_HOST: test-release-feeds-db + count: 1 + any: true + + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_NAME: anchore-feeds + count: 1 + any: true + + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_PASSWORD: anchore-postgres,123 + count: 1 + any: true + + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_PORT: "5432" + count: 1 + any: true + + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_USER: anchore-feeds + count: 1 + any: true + + # since postgresql is enabled, should still use the chart postgresql service name + - it: should render dbHostname with externalEndpoint defined and postgresql enabled + set: + feeds-db: + externalEndpoint: my-endpoint + enabled: true + cloudsql: + enabled: true + templates: + - secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_HOST: test-release-feeds-db + count: 1 + any: true + - isNotSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_HOST: localhost + + - it: should render dbHostname with externalEndpoint defined and postgresql disabled + set: + feeds-db: + externalEndpoint: my-endpoint + enabled: false + templates: + - secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_HOST: my-endpoint + count: 1 + any: true + + - it: should render dbHostname with cloudsql enabled and postgresql disabled + set: + feeds-db: + enabled: false + cloudsql: + enabled: true + templates: + - secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_HOST: localhost + count: 1 + any: true + + - it: should render default dbHostname with postgresql disabled, postgresql externalEndpoint not defined, and cloudsql disabled + set: + feeds-db: + enabled: false + externalEndpoint: "" + cloudsql: + enabled: false + templates: + - secret.yaml + documentIndex: 0 + asserts: + - isSubset: + path: stringData + content: + ANCHORE_FEEDS_DB_HOST: test-release-feeds-db + count: 1 + any: true + + - it: should set the correct protocol with anchoreConfig.internalServicesSSL.enabled to true for deployments + set: + anchoreConfig.internalServicesSSL.enabled: true + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe.httpGet + content: + scheme: HTTPS + + - it: should set the correct protocol with anchoreConfig.internalServicesSSL.enabled to false for deployments + set: + anchoreConfig.internalServicesSSL.enabled: false + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe.httpGet + content: + scheme: HTTP diff --git a/stable/feeds/tests/configmap_test.yaml b/stable/feeds/tests/configmap_test.yaml new file mode 100644 index 00000000..2b904064 --- /dev/null +++ b/stable/feeds/tests/configmap_test.yaml @@ -0,0 +1,51 @@ +suite: ConfigMap Tests +templates: + - configmap.yaml + - envvars_configmap.yaml +values: + - values.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 +tests: + - it: should render the configmaps + asserts: + - matchSnapshot: {} + + - it: should render grype url correctly if .Values.url is set + template: envvars_configmap.yaml + set: + url: https://something/v2/my-grype/something + asserts: + - matchRegex: + path: data["ANCHORE_FEEDS_EXTERNAL_URL"] + pattern: https://something/v2/$ + + - it: should render grype url correctly if .Values.url is set to a v1 url + template: envvars_configmap.yaml + set: + url: https://something/v2/my-grype/something + asserts: + - matchRegex: + path: data["ANCHORE_FEEDS_EXTERNAL_URL"] + pattern: https://something/v2/$ + + - it: should render the default-unset string for the ANCHORE_GITHUB_TOKEN rather than "null" + template: envvars_configmap.yaml + asserts: + - equal: + path: data.ANCHORE_GITHUB_TOKEN + value: "default-unset" + + - it: should render the default-unset string in envvar even if GH token is set, since that is covered by the secrets + template: envvars_configmap.yaml + set: + feeds.drivers.github.enabled: true + feeds.drivers.github.token: foobar + asserts: + - equal: + path: data.ANCHORE_GITHUB_TOKEN + value: "default-unset" \ No newline at end of file diff --git a/stable/feeds/tests/feeds_resources_test.yaml b/stable/feeds/tests/feeds_resources_test.yaml new file mode 100644 index 00000000..bc5e43a9 --- /dev/null +++ b/stable/feeds/tests/feeds_resources_test.yaml @@ -0,0 +1,332 @@ +suite: Feeds Resources Tests +templates: + - deployment.yaml + - secret.yaml + - configmap.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 +values: + - values.yaml + +tests: + - it: should render a valid feeds config file + template: configmap.yaml + asserts: + - matchRegex: + path: data["config.yaml"] + pattern: Anchore Feeds Service Configuration File + + - it: should set the correct resource names + asserts: + - equal: + path: metadata.name + value: test-release-feeds + + - it: should render component labels + template: deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: metadata.labels + content: + bar: "baz-label" + foo: "bar-label" + - isSubset: + path: spec.template.metadata.labels + content: + bar: "baz-label" + foo: "bar-label" + + - it: should render component annotations + templates: + - deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: metadata.annotations + content: + foo: "bar-annotation" + bar: "baz-annotation" + - isSubset: + path: spec.template.metadata.annotations + content: + foo: "bar-annotation" + bar: "baz-annotation" + + - it: should render component matchLabels + template: deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: test-release-feeds + app.kubernetes.io/component: feeds + + - it: should render component serviceAccountName + template: deployment.yaml + documentIndex: 0 + set: + serviceAccountName: feeds-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: feeds-test + + - it: should render component nodeSelector + template: deployment.yaml + documentIndex: 0 + set: + nodeSelector: + feeds: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + feeds: test + + - it: should render component affinity + template: deployment.yaml + documentIndex: 0 + set: + affinity: + feeds: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + feeds: test + + - it: should render component tolerations + template: deployment.yaml + documentIndex: 0 + set: + tolerations: + - key: "feeds" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "feeds" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render scratch volume as emptyDir by default + template: deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + emptyDir: {} + count: 1 + + - it: should render scratch volume using details from values + template: deployment.yaml + documentIndex: 0 + set: + scratchVolume.details: + hostPath: + path: /tmp + type: Directory + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + hostPath: + path: /tmp + type: Directory + count: 1 + + - it: should render configmap volume + template: deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: config-volume + configMap: + name: test-release-feeds + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: anchore-scratch + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: anchore-license + secret: + secretName: anchore-enterprise-license + count: 1 + + - it: should render component container name + template: deployment.yaml + documentIndex: 0 + asserts: + - equal: + path: spec.template.spec.containers[0].name + value: feeds-feeds + + - it: should render component entrypoint args + template: deployment.yaml + documentIndex: 0 + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: ^/docker-entrypoint\.sh anchore-enterprise-manager service start --no-auto-upgrade feeds$ + count: 1 + + - it: should render component environment variables + template: deployment.yaml + documentIndex: 0 + set: + extraEnv: + - name: feeds + value: test + - name: test + value: foobar + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ANCHORE_PORT + value: "8448" + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: feeds + value: test + count: 1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: test + value: foobar + + - it: should render component ports + template: deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: feeds-api + containerPort: 8448 + count: 1 + + - it: should render component volumeMounts + template: deployment.yaml + documentIndex: 0 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: config-volume + mountPath: /config/config.yaml + subPath: config.yaml + count: 1 + any: true + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: anchore-scratch + mountPath: /anchore_scratch + count: 1 + any: true + + - it: should render component probes + template: deployment.yaml + documentIndex: 0 + asserts: + - isSubset: + path: spec.template.spec.containers[0].livenessProbe + content: + httpGet: + path: /health + port: feeds-api + scheme: HTTP + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + count: 1 + - isSubset: + path: spec.template.spec.containers[0].readinessProbe + content: + httpGet: + path: /health + port: feeds-api + scheme: HTTP + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + count: 1 + + - it: should render component resource requests & limits + template: deployment.yaml + documentIndex: 0 + set: + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 + + + - it: should not render nodePorts + template: deployment.yaml + documentIndex: 1 + asserts: + - isNotSubset: + path: spec.ports[0] + content: + nodePort: 9999 + count: 1 + - it: should render nodePorts + template: deployment.yaml + documentIndex: 1 + set: + service.nodePort: 9999 + asserts: + - isSubset: + path: spec.ports[0] + content: + name: feeds-api + port: 8448 + protocol: TCP + targetPort: 8448 + nodePort: 9999 + count: 1 diff --git a/stable/feeds/tests/posthook_upgrade_resources_test.yaml b/stable/feeds/tests/posthook_upgrade_resources_test.yaml new file mode 100644 index 00000000..fef7578d --- /dev/null +++ b/stable/feeds/tests/posthook_upgrade_resources_test.yaml @@ -0,0 +1,178 @@ +suite: Posthook Upgrade Job Tests +templates: + - hooks/post-upgrade/upgrade_job.yaml + - secret.yaml +values: + - values.yaml +set: + feedsUpgradeJob.enabled: true + feedsUpgradeJob.usePostUpgradeHook: true +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +tests: + - it: post-upgrade hook job gets created when feedsUpgradeJob.usePostUpgradeHook is true + template: hooks/post-upgrade/upgrade_job.yaml + asserts: + - containsDocument: + kind: Job + apiVersion: batch/v1 + name: test-release-feeds-999-upgrade + namespace: test-namespace + + - it: post-upgrade hook job does not get created when feedsUpgradeJob.usePostUpgradeHook is false + template: hooks/post-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob: + usePostUpgradeHook: false + asserts: + - hasDocuments: + count: 0 + + - it: post-upgrade hook job does not get created when feedsUpgradeJob.enabled is false + template: hooks/post-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob: + enabled: false + asserts: + - hasDocuments: + count: 0 + + - it: should render helm hook annotations + template: hooks/post-upgrade/upgrade_job.yaml + asserts: + - isSubset: + path: metadata.annotations + content: + "helm.sh/hook": post-upgrade + "helm.sh/hook-weight": "0" + + - it: should not render helm hook annotations when feedsUpgradeJob.force is true + template: hooks/post-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.force: true + asserts: + - isNotSubset: + path: metadata.annotations + content: + "helm.sh/hook": post-upgrade + "helm.sh/hook-weight": "0" + + - it: should render component serviceAccountName + template: hooks/post-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.serviceAccountName: feedsUpgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: feedsUpgradeJob-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: hooks/post-upgrade/upgrade_job.yaml + set: + serviceAccountName: global-test + feedsUpgradeJob.serviceAccountName: feedsUpgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: feedsUpgradeJob-test + + - it: should render component nodeSelector + template: hooks/post-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.nodeSelector: + feedsUpgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + feedsUpgradeJob: test + + - it: should render component affinity + template: hooks/post-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.affinity: + feedsUpgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + feedsUpgradeJob: test + + - it: should render component tolerations + template: hooks/post-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.tolerations: + - key: "feedsUpgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "feedsUpgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render proper database endpoint on entrypoint + template: hooks/post-upgrade/upgrade_job.yaml + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-connect postgresql:\/\/\"\$\{ANCHORE_FEEDS_DB_USER\}\":\"\$\{ANCHORE_FEEDS_DB_PASSWORD\}\"@\"\$\{ANCHORE_FEEDS_DB_HOST\}\":\"\$\{ANCHORE_FEEDS_DB_PORT\}\"\/\"\$\{ANCHORE_FEEDS_DB_NAME\}\" upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is require + template: hooks/post-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: require + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_FEEDS_DB_USER\}\":\"\$\{ANCHORE_FEEDS_DB_PASSWORD\}\"@\"\$\{ANCHORE_FEEDS_DB_HOST\}\":\"\$\{ANCHORE_FEEDS_DB_PORT\}\"\/\"\$\{ANCHORE_FEEDS_DB_NAME\}\"\?sslmode\=require upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is verify-full + template: hooks/post-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: verify-full + anchoreConfig.database.sslRootCertFileName: test-root.crt + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_FEEDS_DB_USER\}\":\"\$\{ANCHORE_FEEDS_DB_PASSWORD\}\"@\"\$\{ANCHORE_FEEDS_DB_HOST\}\":\"\$\{ANCHORE_FEEDS_DB_PORT\}\"\/\"\$\{ANCHORE_FEEDS_DB_NAME\}\"\?sslmode\=verify-full\\&sslrootcert\=\/home\/anchore\/certs\/test-root\.crt upgrade --dontask; + count: 1 + + - it: should render component resource requests & limits + template: hooks/post-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + feedsUpgradeJob.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 diff --git a/stable/feeds/tests/prehook_upgrade_resources_test.yaml b/stable/feeds/tests/prehook_upgrade_resources_test.yaml new file mode 100644 index 00000000..b325aab2 --- /dev/null +++ b/stable/feeds/tests/prehook_upgrade_resources_test.yaml @@ -0,0 +1,253 @@ +suite: PreHook Upgrade Job Tests +templates: + - templates/hooks/pre-upgrade/upgrade_job.yaml + - templates/hooks/pre-upgrade/upgrade_rbac.yaml + - anchore_secret.yaml +values: + - values.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 + +upgrade-resource: &upgrade-resources + - templates/hooks/pre-upgrade/upgrade_job.yaml + - templates/hooks/pre-upgrade/upgrade_rbac.yaml + +tests: + - it: rbac should match snapshot + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + asserts: + - matchSnapshot: {} + + - it: should render helm hook annotations on rbac + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + asserts: + - isSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + + - it: should not render helm hook annotations on rbac when feedsUpgradeJob.force is true + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + set: + feedsUpgradeJob.force: true + asserts: + - isNotSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "1" + + - it: pre-hook rbac gets created by default + template: templates/hooks/pre-upgrade/upgrade_rbac.yaml + asserts: + - containsDocument: + kind: ServiceAccount + apiVersion: v1 + name: test-release-feeds-upgrade-sa + namespace: test-namespace + documentIndex: 0 + - containsDocument: + kind: RoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + name: test-release-feeds-upgrade-role-binding + namespace: test-namespace + documentIndex: 1 + - containsDocument: + kind: Role + apiVersion: rbac.authorization.k8s.io/v1 + name: test-release-feeds-upgrade-role + namespace: test-namespace + documentIndex: 2 + + - it: pre-hook job does not get created when post-upgrade hook is enabled + templates: *upgrade-resources + set: + feedsUpgradeJob: + usePostUpgradeHook: true + asserts: + - hasDocuments: + count: 0 + + - it: pre-hook job does not get created when feedsUpgradeJob.enabled is false + templates: *upgrade-resources + set: + feedsUpgradeJob: + enabled: false + asserts: + - hasDocuments: + count: 0 + + - it: pre-hook job gets created by default + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - containsDocument: + kind: Job + apiVersion: batch/v1 + name: test-release-feeds-999-upgrade + namespace: test-namespace + + - it: should render helm hook annotations + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - isSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "3" + + - it: should not render helm hook annotations when feedsUpgradeJob.force is true + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.force: true + asserts: + - isNotSubset: + path: metadata.annotations + content: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "3" + + - it: should render component serviceAccountName + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.serviceAccountName: feedsUpgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: feedsUpgradeJob-test + + - it: should render service account name when global serviceAccountName is set and feedsUpgradeJob.rbacCreate is true + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + serviceAccountName: feedsUpgradeJob-global-test + feedsUpgradeJob.rbacCreate: true + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: test-release-feeds-upgrade-sa + + - it: should render global serviceAccountName when feedsUpgradeJob.rbacCreate is false + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + serviceAccountName: feedsUpgradeJob-global-test + feedsUpgradeJob.rbacCreate: false + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: feedsUpgradeJob-global-test + + - it: should render component serviceAccountName even when global serviceAccountName is set + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + serviceAccountName: global-test + feedsUpgradeJob.serviceAccountName: feedsUpgradeJob-test + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: feedsUpgradeJob-test + + - it: should render component nodeSelector + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.nodeSelector: + feedsUpgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.nodeSelector + content: + feedsUpgradeJob: test + + - it: should render component affinity + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.affinity: + feedsUpgradeJob: test + asserts: + - isSubset: + path: spec.template.spec.affinity + content: + feedsUpgradeJob: test + + - it: should render component tolerations + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + feedsUpgradeJob.tolerations: + - key: "feedsUpgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + asserts: + - contains: + path: spec.template.spec.tolerations + content: + key: "feedsUpgradeJob" + operator: "Equal" + value: "test" + effect: "NoSchedule" + count: 1 + + - it: should render proper initContainers + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - matchSnapshot: + path: spec.template.spec.initContainers + + - it: should render proper database endpoint on entrypoint + template: templates/hooks/pre-upgrade/upgrade_job.yaml + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-connect postgresql:\/\/\"\$\{ANCHORE_FEEDS_DB_USER\}\":\"\$\{ANCHORE_FEEDS_DB_PASSWORD\}\"@\"\$\{ANCHORE_FEEDS_DB_HOST\}\":\"\$\{ANCHORE_FEEDS_DB_PORT\}\"\/\"\$\{ANCHORE_FEEDS_DB_NAME\}\" upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is require + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: require + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_FEEDS_DB_USER\}\":\"\$\{ANCHORE_FEEDS_DB_PASSWORD\}\"@\"\$\{ANCHORE_FEEDS_DB_HOST\}\":\"\$\{ANCHORE_FEEDS_DB_PORT\}\"\/\"\$\{ANCHORE_FEEDS_DB_NAME\}\"\?sslmode\=require upgrade --dontask; + count: 1 + + - it: should render proper database endpoint on entrypoint when ssl is enabled and mode is verify-full + template: templates/hooks/pre-upgrade/upgrade_job.yaml + set: + anchoreConfig.database.ssl: true + anchoreConfig.database.sslMode: verify-full + anchoreConfig.database.sslRootCertFileName: test-root.crt + asserts: + - matchRegex: + path: spec.template.spec.containers[0].args[0] + pattern: anchore-enterprise-manager db --db-use-ssl --db-connect postgresql:\/\/\"\$\{ANCHORE_FEEDS_DB_USER\}\":\"\$\{ANCHORE_FEEDS_DB_PASSWORD\}\"@\"\$\{ANCHORE_FEEDS_DB_HOST\}\":\"\$\{ANCHORE_FEEDS_DB_PORT\}\"\/\"\$\{ANCHORE_FEEDS_DB_NAME\}\"\?sslmode\=verify-full\\&sslrootcert\=\/home\/anchore\/certs\/test-root\.crt upgrade --dontask; + count: 1 + + - it: should render component resource requests & limits + template: templates/hooks/pre-upgrade/upgrade_job.yaml + documentIndex: 0 + set: + feedsUpgradeJob.resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + asserts: + - isSubset: + path: spec.template.spec.containers[0].resources.requests + content: + cpu: 100m + memory: 128Mi + count: 1 + - isSubset: + path: spec.template.spec.containers[0].resources.limits + content: + cpu: 200m + memory: 256Mi + count: 1 diff --git a/stable/feeds/tests/secret_test.yaml b/stable/feeds/tests/secret_test.yaml new file mode 100644 index 00000000..d0684bf8 --- /dev/null +++ b/stable/feeds/tests/secret_test.yaml @@ -0,0 +1,26 @@ +suite: Secret Tests +templates: + - secret.yaml +release: + name: test-release + namespace: test-namespace +chart: + version: 9.9.9 + appVersion: 9.9.9 +tests: + - it: should not render github driver token in the secret when github driver is not enabled + template: secret.yaml + values: + - values.yaml + asserts: + - notExists: + path: stringData.ANCHORE_GITHUB_TOKEN + - it: should render github driver token in the secret correctly when driver is enabled & token is set + template: secret.yaml + set: + anchoreConfig.feeds.drivers.github.token: foobar + anchoreConfig.feeds.drivers.github.enabled: true + asserts: + - equal: + path: stringData.ANCHORE_GITHUB_TOKEN + value: "foobar" diff --git a/stable/feeds/tests/values.yaml b/stable/feeds/tests/values.yaml new file mode 100644 index 00000000..9d58f1ee --- /dev/null +++ b/stable/feeds/tests/values.yaml @@ -0,0 +1,13 @@ +annotations: + foo: "bar-annotation" + bar: "baz-annotation" + +labels: + foo: "bar-label" + bar: "baz-label" + +extraEnv: + - name: foo + value: bar + - name: bar + value: baz diff --git a/stable/feeds/values.yaml b/stable/feeds/values.yaml new file mode 100644 index 00000000..d8e126a4 --- /dev/null +++ b/stable/feeds/values.yaml @@ -0,0 +1,611 @@ +################################################### +## @section Common Resource Parameters +## Common params used by all Anchore Feeds k8s resources +################################################### + +## @param standalone Enable running the Anchore Feeds service in standalone mode +## +standalone: true + +## @param url Set a custom feeds URL. Useful when using a feeds service endpoint that is external from the cluster. +## i.e. https://: +url: "" + +## @param fullnameOverride overrides the fullname set on resources +## +fullnameOverride: "" + +## @param nameOverride overrides the name set on resources +## +nameOverride: "" + +## @param image Image used for feeds deployment +## +image: docker.io/anchore/enterprise:v5.4.0 + +## @param imagePullPolicy Image pull policy used by all deployments +## ref: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy +## +imagePullPolicy: IfNotPresent + +## @param imagePullSecretName Name of Docker credentials secret for access to private repos +## Pre-populated with the pull secret name specified in the Anchore docs & quickstart instructions +## Secrets must be manually created in the same namespace as release +## +imagePullSecretName: anchore-enterprise-pullcreds + +## @param serviceAccountName Name of a service account used to run all Feeds pods +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccountName: "" + +## @param injectSecretsViaEnv Enable secret injection into pod via environment variables instead of via k8s secrets +## Useful for injecting secrets directly into k8s pods from Hashicorp vault +## +injectSecretsViaEnv: false + +## @param licenseSecretName Name of the Kubernetes secret containing your license.yaml file +## This must be manually created. For example with the following command: +## `kubectl create secret generic anchore-enterprise-license --from-file=license.yaml=` +## +licenseSecretName: anchore-enterprise-license + +## @param certStoreSecretName Name of secret containing the certificates & keys used for SSL, SAML & CAs +## This secret will be mounted in pods to /home/anchore/certs +## Secret must be manually created in the same namespace as release +## +certStoreSecretName: "" + +## @param extraEnv Common environment variables set on all containers +## ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ +## +extraEnv: [] + +## @param labels Common labels set on all Kubernetes resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +labels: {} + +## @param annotations Common annotations set on all Kubernetes resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +annotations: {} + +## @param resources Resource requests and limits for Anchore Feeds pods +## +resources: {} + +## @param nodeSelector Node labels for Anchore Feeds pod assignment +## +nodeSelector: {} + +## @param tolerations Tolerations for Anchore Feeds pod assignment +## +tolerations: [] + +## @param affinity Affinity for Anchore Feeds pod assignment +## +affinity: {} + +## @param service.type Service type for Anchore Feeds +## @param service.port Service port for Anchore Feeds +## @param service.annotations Annotations for Anchore Feeds service +## @param service.labels Labels for Anchore Feeds service +## @param service.nodePort nodePort for Anchore Feeds service +## +service: + type: ClusterIP + port: 8448 + annotations: {} + labels: {} + nodePort: "" + +## @param scratchVolume.mountPath The mount path of an external volume for scratch space for image analysis +## @param scratchVolume.fixGroupPermissions Enable an initContainer that will fix the fsGroup permissions +## @param scratchVolume.fixerInitContainerImage Set the container image for the permissions fixer init container +## @param scratchVolume.details [object] Details for the k8s volume to be created +## Generally speaking you need to provision 3x the size of the largest image (uncompressed) that you want to analyze +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +scratchVolume: + mountPath: /anchore_scratch + fixGroupPermissions: false + fixerInitContainerImage: alpine + details: {} + +## @param persistence.enabled Enable mounting an external volume for feeds driver workspace +## @param persistence.fixGroupPermissions Enable an initContainer that will fix the fsGroup permissions +## @param persistence.resourcePolicy Resource policy Helm annotation on PVC. Can be nil or "keep" +## @param persistence.existingClaim Specify an existing volume claim +## @param persistence.storageClass Persistent volume storage class +## If persistence.storageClass="" (the default), no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS & OpenStack) +## @param persistence.accessMode Access Mode for persistent volume +## @param persistence.size Size of persistent volume +## @param persistence.mountPath Mount path on Anchore Feeds container for persistent volume +## @param persistence.subPath Directory name used for persistent volume storage +## @param persistence.annotations Annotations for PVC +## +persistence: + enabled: true + fixGroupPermissions: false + resourcePolicy: keep + existingClaim: "" + storageClass: "" + accessMode: ReadWriteOnce + size: 40Gi + mountPath: /workspace + subPath: "feeds-workspace" + annotations: {} + +## @param extraVolumes mounts additional volumes to each pod +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +extraVolumes: [] +# - name: config +# secret: +# secretName: config + +## @param extraVolumeMounts mounts additional volumes to each pod +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +extraVolumeMounts: [] +# - name: config +# mountPath: "/vault/secrets/config" +# subPath: config +# readOnly: true + +## @param securityContext.runAsUser The securityContext runAsUser for all Feeds pods +## @param securityContext.runAsGroup The securityContext runAsGroup for all Feeds pods +## @param securityContext.fsGroup The securityContext fsGroup for all Feeds pods +## By default the Feeds images utilize the user/group 'anchore' using uid/gid 1000 +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + +## @param containerSecurityContext The securityContext for all Feeds containers +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## +containerSecurityContext: {} + +## @param probes.liveness.initialDelaySeconds Initial delay seconds for liveness probe +## @param probes.liveness.timeoutSeconds Timeout seconds for liveness probe +## @param probes.liveness.periodSeconds Period seconds for liveness probe +## @param probes.liveness.failureThreshold Failure threshold for liveness probe +## @param probes.liveness.successThreshold Success threshold for liveness probe +## @param probes.readiness.timeoutSeconds Timeout seconds for the readiness probe +## @param probes.readiness.periodSeconds Period seconds for the readiness probe +## @param probes.readiness.failureThreshold Failure threshold for the readiness probe +## @param probes.readiness.successThreshold Success threshold for the readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## +probes: + liveness: + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + readiness: + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + +## @param doSourceAtEntry.enabled Does a `source` of the file paths defined before starting Anchore services +## @param doSourceAtEntry.filePaths List of file paths to `source` before starting Anchore services +## For example, if using hashicorp vault, set to /vault/secrets/config +## +doSourceAtEntry: + enabled: false + filePaths: [] + # - "/vault/secrets/config" + +## @param useExistingSecrets forgoes secret creation and uses the secret defined in existingSecretName +## When useExistingSecrets is set to `true` the chart will not create secrets specifying the environment variables used in deployments. +## Instead, the chart will use secrets that have already been applied to the namespace that this chart is being deployed to. +## +useExistingSecrets: false + +## @param existingSecretName Name of the existing secret to be used for Anchore Feeds Service +## +existingSecretName: anchore-enterprise-feeds-env + +## @param configOverride Allows for overriding the default Anchore configuration file +## This value can be used to pass in a custom configuration file for Anchore services +## This config file will be templated when it is added to the configmap, so Helm values and functions can be used +## +configOverride: {} + +## @param scripts [object] Collection of helper scripts usable in all anchore enterprise pods +## +scripts: + anchore-config: | + #!/bin/bash + while IFS= read -r line; do + while [[ "$line" =~ (\$\{[a-zA-Z_][a-zA-Z_0-9]*\}) ]]; do + VAR_NAME=${BASH_REMATCH[1]#*\{}; VAR_NAME=${VAR_NAME%\}}; + line=${line//${BASH_REMATCH[1]}/${!VAR_NAME}}; + done; + printf '%s\n' "$line"; + done < /config/config.yaml + +##################################################################### +## @section Anchore Feeds Configuration Parameters +## Params used for all Anchore Feeds service configuration files +##################################################################### + +anchoreConfig: + ## @param anchoreConfig.service_dir Path to directory where default Anchore configs are placed at startup + ## This path must be a writable location for the pod + ## + service_dir: /anchore_service + + ## @param anchoreConfig.log_level The log level for Anchore services + ## options available: FATAL, ERROR, WARN, INFO, DEBUG, SPEW + ## + log_level: INFO + + ## @param anchoreConfig.keys.secret The shared secret used for signing & encryption, auto-generated by Helm if not set + ## @param anchoreConfig.keys.privateKeyFileName The file name of the private key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName + ## @param anchoreConfig.keys.publicKeyFileName The file name of the public key used for signing & encryption, found in the k8s secret specified in .Values.certStoreSecretName + ## Only one of anchoreConfig.keys.secret or anchoreConfig.keys.privateKeyFileName/anchoreConfig.keys.publicKeyFileName should be configured. + ## If all are set then the keys take precedence over the secret value. + ## + keys: + secret: "" + privateKeyFileName: "" + publicKeyFileName: "" + + ## @param anchoreConfig.user_authentication.oauth.enabled Enable OAuth for Anchore user authentication + ## @param anchoreConfig.user_authentication.oauth.default_token_expiration_seconds The expiration, in seconds, for OAuth tokens + ## @param anchoreConfig.user_authentication.oauth.refresh_token_expiration_seconds The expiration, in seconds, for OAuth refresh tokens + ## ref: https://docs.anchore.com/current/docs/installation/configuration/user_credential_storage/#bearer-tokensoauth2 + ## + ## @param anchoreConfig.user_authentication.hashed_passwords Enable storing passwords as secure hashes in the database + ## This can dramatically increase CPU usage if you don't also use OAuth and tokens for internal communications + ## WARNING: You should not change this after a system has been initialized as it may cause a mismatch in existing passwords + ## ref: https://docs.anchore.com/current/docs/installation/configuration/user_credential_storage/#configuring-hashed-passwords-and-oauth + ## + ## @param anchoreConfig.user_authentication.sso_require_existing_users set to true in order to disable the SSO JIT provisioning during authentication + ## This provides an additional layer of security and configuration for SSO users to gain access to Anchore. + ## + user_authentication: + oauth: + enabled: false + default_token_expiration_seconds: 3600 + refresh_token_expiration_seconds: 86400 + hashed_passwords: false + sso_require_existing_users: false + + ## @param anchoreConfig.metrics.enabled Enable Prometheus metrics for all Anchore services + ## @param anchoreConfig.metrics.auth_disabled Disable auth on Prometheus metrics for all Anchore services + ## + metrics: + enabled: false + auth_disabled: false + + ## @param anchoreConfig.database.timeout + ## @param anchoreConfig.database.ssl Enable SSL/TLS for the database connection + ## @param anchoreConfig.database.sslMode The SSL mode to use for database connection + ## @param anchoreConfig.database.sslRootCertFileName File name of the database root CA certificate stored in the k8s secret specified with .Values.certStoreSecretName + ## @param anchoreConfig.database.db_pool_size The database max connection pool size + ## @param anchoreConfig.database.db_pool_max_overflow The maximum overflow size of the database connection pool + ## @param anchoreConfig.database.engineArgs Set custom database engine arguments for SQLAlchemy + ## ref: https://docs.sqlalchemy.org/en/14/core/engines.html#engine-creation-api + ## + database: + timeout: 120 + ssl: false + sslMode: require + sslRootCertFileName: "" + db_pool_size: 30 + db_pool_max_overflow: 100 + engineArgs: {} + + ## @param anchoreConfig.internalServicesSSL.enabled Force all Enterprise services to use SSL for internal communication + ## @param anchoreConfig.internalServicesSSL.verifyCerts Enable cert verification against the local cert bundle, if this set to false self-signed certs are allowed + ## @param anchoreConfig.internalServicesSSL.certSecretKeyFileName File name of the private key used for internal SSL stored in the secret specified in .Values.certStoreSecretName + ## @param anchoreConfig.internalServicesSSL.certSecretCertFileName File name of the root CA certificate used for internal SSL stored in the secret specified in .Values.certStoreSecretName + ## ref: https://docs.anchore.com/current/docs/installation/configuration/tls_ssl_config/ + ## + internalServicesSSL: + enabled: false + verifyCerts: false + certSecretKeyFileName: "" + certSecretCertFileName: "" + + feeds: + ## @param anchoreConfig.feeds.cycle_timers.driver_sync Time delay in seconds between consecutive driver runs for processing data + ## + cycle_timers: + driver_sync: 7200 + + drivers: + ## @param anchoreConfig.feeds.drivers.debian.releases Additional Debian feeds groups + ## + debian: + releases: {} + + ## @param anchoreConfig.feeds.drivers.ubuntu.releases Additional Ubuntu feed groups + ## + ubuntu: + releases: {} + + ## @param anchoreConfig.feeds.drivers.npm.enabled Enable vulnerability drivers for npm data + ## + npm: + enabled: false + + ## @param anchoreConfig.feeds.drivers.gem.enabled Enable vulnerability drivers for gem data + ## @param anchoreConfig.feeds.drivers.gem.db_connect Defines the database endpoint used for loading the rubygems package data as a PostgreSQL dump + ## + gem: + enabled: false + db_connect: "postgresql://${ANCHORE_GEM_DB_USER}:${ANCHORE_GEM_DB_PASSWORD}@${ANCHORE_GEM_DB_HOST}:${ANCHORE_GEM_DB_PORT}/${ANCHORE_GEM_DB_NAME}" + + ## @param anchoreConfig.feeds.drivers.nvdv2.api_key The NVD API key value + ## Request one from https://nvd.nist.gov/developers/request-an-api-key + ## + nvdv2: + api_key: "" + + ## @param anchoreConfig.feeds.drivers.msrc.enabled Enable Microsoft feeds + ## @param anchoreConfig.feeds.drivers.msrc.whitelist MSRC product IDs for generating feed data, this extends the pre-defined list of product IDs + ## ref: https://docs.anchore.com/current/docs/installation/feeds/#driver-configuration + ## + msrc: + enabled: false + whitelist: [] + + ## @param anchoreConfig.feeds.drivers.github.enabled Enable GitHub advisory feeds (requires GitHub PAT) + ## ref: https://docs.anchore.com/current/docs/installation/feeds/#driver-configuration + ## @param anchoreConfig.feeds.drivers.github.token GitHub developer personal access token with zero permission scopes + ## ref: https://github.com/settings/tokens/new + ## + github: + enabled: false + token: "" + +############################################# +## @section Anchore Feeds Database Parameters +############################################# + +feeds-db: + ## @param feeds-db.chartEnabled Use the dependent chart for Feeds Postgresql deployment + ## + chartEnabled: true + + ## @param feeds-db.externalEndpoint External Feeds Postgresql hostname when not using Helm managed chart (eg. mypostgres.myserver.io) + ## feeds-db.externalEndpoint, feeds-db.auth.username, feeds-db.auth.password, + ## feeds-db.auth.database, & feeds-db.postgresqlPort are required values for external Postgres + ## + externalEndpoint: "" + + ## @param feeds-db.auth.username Username used to connect to Postgresql + ## @param feeds-db.auth.password Password used to connect to Postgresql + ## @param feeds-db.auth.database Database name used when connecting to Postgresql + ## + auth: + username: anchore-feeds + password: anchore-postgres,123 + database: anchore-feeds + + primary: + ## @param feeds-db.primary.service.ports.postgresql Port used to connect to Postgresql + ## + service: + ports: + postgresql: 5432 + + ## @param feeds-db.primary.persistence.size Configure size of the persistent volume used with helm managed chart + ## + persistence: + size: 20Gi + + ## @param feeds-db.primary.extraEnvVars An array to add extra environment variables + ## + extraEnvVars: [] + + ## @param feeds-db.image.tag Specifies the image to use for this chart. + ## + image: + tag: 13.11.0-debian-11-r15 + +####################################### +## @section Feeds Gem Database Parameters +####################################### + +gem-db: + ## @extra gem-db.chartEnabled Use the dependent chart for Postgresql deployment + ## Configures a separate postgres database deployment for the feeds service Ruby Gems DB + ## requires feeds.gemDriverEnabled=true + ## + ## chartEnabled: false + + ## @param gem-db.externalEndpoint External Postgresql hostname when not using Helm managed chart (eg. mypostgres.myserver.io) + ## gem-db.externalEndpoint, gem-db.postgresqlUsername, gem-db.postgresqlPassword, + ## gem-db.postgresqlDatabase, & gem-db.postgresqlPort are required values for external Postgres + ## + externalEndpoint: "" + + ## @param gem-db.auth.username Username used to connect to Postgresql + ## @param gem-db.auth.password Password used to connect to Postgresql + ## @param gem-db.auth.database Database name used when connecting to Postgresql + ## + auth: + username: anchore-gem-feeds + password: anchore-postgres,123 + database: anchore-gem-feeds + + primary: + ## @param gem-db.primary.service.ports.postgresql Port used to connect to Postgresql + ## + service: + ports: + postgresql: 5432 + + ## @param gem-db.primary.persistence.size Configure size of the persistent volume used with helm managed chart + ## + persistence: + size: 20Gi + + ## @param gem-db.primary.extraEnvVars An array to add extra environment variables + ## + extraEnvVars: [] + + ## @param gem-db.image.tag Specifies the image to use for this chart. + ## + image: + tag: 13.11.0-debian-11-r15 + +################################################ +## @section Anchore Feeds Upgrade Job Parameters +## Upgrade job uses a Helm post-install-hook +################################################ +feedsUpgradeJob: + ## @param feedsUpgradeJob.enabled Enable the Anchore Feeds database upgrade job + ## + enabled: true + + ## @param feedsUpgradeJob.force Force the Anchore Feeds database upgrade job to run as a regular job instead of as a Helm hook + ## + force: false + + ## @param feedsUpgradeJob.rbacCreate Create RBAC resources for the upgrade job + ## By default, the anchore upgrade job utilizes a service account that will be created to call kubectl to scale down the deployment before running the upgrade job. + ## The service account is granted deployment, deployment/scale, and pod permissions. See templates/hooks/pre-upgrade/db-upgrade-rbac.yaml for the full list of permissions + ## + rbacCreate: true + + ## @param feedsUpgradeJob.serviceAccountName Use an existing service account for the upgrade job + ## + serviceAccountName: "" + + ## @param feedsUpgradeJob.usePostUpgradeHook Use a Helm post-upgrade hook to run the upgrade job instead of the default pre-upgrade hook. This job does not require creating RBAC resources. + ## Uses the same mechanism for upgrades as the legacy anchore-engine chart. Not compatible with `helm upgrade --wait` or ArgoCD. + ## + usePostUpgradeHook: false + + + ## @param feedsUpgradeJob.kubectlImage The image to use for the upgrade job's init container that uses kubectl to scale down deployments before an upgrade + ## This is only used in the preupgrade job. + ## + kubectlImage: bitnami/kubectl:1.27 + + ## @param feedsUpgradeJob.nodeSelector Node labels for the Anchore Feeds upgrade job pod assignment + ## + nodeSelector: {} + + ## @param feedsUpgradeJob.tolerations Tolerations for the Anchore Feeds upgrade job pod assignment + ## + tolerations: [] + + ## @param feedsUpgradeJob.affinity Affinity for the Anchore Feeds upgrade job pod assignment + ## + affinity: {} + + ## @param feedsUpgradeJob.annotations Annotations for the Anchore Feeds upgrade job + ## + annotations: {} + + ## @param feedsUpgradeJob.labels Labels for the Anchore Feeds upgrade job + ## + labels: {} + + ## @param feedsUpgradeJob.resources Resources for the Anchore Feeds upgrade job + ## + resources: {} + + ## @param feedsUpgradeJob.ttlSecondsAfterFinished The time period in seconds the upgrade job, and it's related pods should be retained for + ## Defaults to 0 == immediate deletion after completion + ## Set this to -1 to disable deleting the job automatically (NOTE: This can cause issues with upgrades) + ## + ttlSecondsAfterFinished: -1 + +############################## +## @section Ingress Parameters +############################## +ingress: + ## @param ingress.enabled Create an ingress resource for external Anchore service APIs + ## ref: https://kubernetes.io/docs/user-guide/ingress/ + ## + enabled: false + + ## @param ingress.labels Labels for the ingress resource + ## + labels: {} + + ## @param ingress.annotations [object] Annotations for the ingress resource + ## By default this chart is setup to use an NGINX ingress controller, which needs to be installed & configured on your cluster + ## ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/ + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## ref: https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features + ## + annotations: {} + + ## @param ingress.hosts List of custom hostnames for the Anchore Feeds API + ## + hosts: [] + + ## @param ingress.paths The path used for accessing the Anchore Feeds API + ## + paths: + - /v2/feeds/ + + ## @param ingress.tls Configure tls for the ingress resource + ## Secrets must be manually created in the release namespace + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + ## @param ingress.ingressClassName sets the ingress class name. As of k8s v1.18, this should be nginx + ## ref: # Reference: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: nginx + +######################################### +## @section Google CloudSQL DB Parameters +######################################### +cloudsql: + ## @param cloudsql.enabled Use CloudSQL proxy container for GCP database access + ## + enabled: false + + ## @param cloudsql.image Image to use for GCE CloudSQL Proxy + ## + image: gcr.io/cloudsql-docker/gce-proxy:1.25.0 + + ## @param cloudsql.imagePullPolicy Image Pull Policy to use for CloudSQL image + ## + imagePullPolicy: IfNotPresent + + ## @param cloudsql.instance CloudSQL instance, eg: 'project:zone:instancename' + ## + instance: "" + + ## @param cloudsql.useExistingServiceAcc Use existing service account + ## If using an existing Service Account, you must create a secret which includes the JSON token from Google's IAM + ## ref: https://cloud.google.com/sql/docs/postgres/authentication + ## + useExistingServiceAcc: false + + ## @param cloudsql.serviceAccSecretName + ## + serviceAccSecretName: "" + + ## @param cloudsql.serviceAccJsonName + ## + serviceAccJsonName: "" + + ## @param cloudsql.extraArgs a list of extra arguments to be passed into the cloudsql container command. eg + ## extraArgs: + ## - "-ip_address_types=PRIVATE" + ## - "-enable_iam_login" + ## + extraArgs: [] diff --git a/stable/k8s-inventory/.helmignore b/stable/k8s-inventory/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/stable/k8s-inventory/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/stable/k8s-inventory/Chart.yaml b/stable/k8s-inventory/Chart.yaml new file mode 100644 index 00000000..ea2a0696 --- /dev/null +++ b/stable/k8s-inventory/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +name: k8s-inventory +version: 0.2.2 +appVersion: "1.2.0" +description: A Helm chart for Kubernetes Automated Inventory, which describes which images are in use in a given Kubernetes Cluster +keywords: + - analysis + - docker + - anchore + - image + - inventory + - security + - scanner +home: https://anchore.com +maintainers: + - name: btodhunter + email: bradyt@anchore.com + - name: hn23 + email: hung.nguyen@anchore.com +icon: https://anchoreprd.wpengine.com/wp-content/uploads/2021/12/favicon.png diff --git a/stable/k8s-inventory/README.md b/stable/k8s-inventory/README.md new file mode 100644 index 00000000..6aa26164 --- /dev/null +++ b/stable/k8s-inventory/README.md @@ -0,0 +1,113 @@ +# K8s Inventory Helm Chart +K8s Inventory is the foundation of Anchore Enterprise's Runtime Inventory feature. Running K8s Inventory via Helm is a great way to retrieve your Kubernetes Image inventory without providing Cluster Credentials to Anchore. The minimum version of the Anchore Enterprise platform required for K8s Inventory is 4.7. + +K8s Inventory runs as a read-only service account in the cluster it's deployed to. + +In order to report the inventory to Anchore, K8s Inventory does require authentication material for your Anchore Enterprise deployment. +K8s Inventory's helm chart automatically creates a kubernetes secret for the Anchore Password based on the values file you use, Ex.: +``` +k8sInventory: + anchore: + password: foobar +``` +It will set the following environment variable based on this: `ANCHORE_K8S_INVENTORY_ANCHORE_PASSWORD=foobar`. + +If you don't want to store your Anchore password in the values file, you can create your own secret to do this: +``` +apiVersion: v1 +kind: Secret +metadata: + name: k8s-inventory-anchore-password +type: Opaque +stringData: + ANCHORE_K8S_INVENTORY_ANCHORE_PASSWORD: foobar +``` +and then provide it to the helm chart via the values file: +``` +useExistingSecret: true +existingSecretName: k8s-inventory-anchore-password +``` +You can install the chart via via: +``` +helm repo add anchore https://charts.anchore.io +helm install -f anchore/k8s-inventory +``` +A basic values file can always be found [here](https://github.com/anchore/anchore-charts/tree/master/stable/k8s-inventory/values.yaml) + +The key configurations are in the k8sInventory.anchore section. K8s Inventory must be able to resolve the Anchore URL and requires API credentials. + +Note: the Anchore API Password can be provided via a kubernetes secret, or injected into the environment of the K8s Inventory container +* For injecting the environment variable, see: injectSecretsViaEnv +* For providing your own secret for the Anchore API Password, see: useExistingSecret. K8s Inventory creates it's own secret based on your values.yaml file for key k8sInventory.anchore.password, but the k8sInventory.useExistingSecret key allows you to create your own secret and provide it in the values file. + +See the [K8s Inventory repo](https://github.com/anchore/k8s-inventory) for more information about the K8s Inventory specific configuration + +## Parameters + +### Common Resource Parameters + +| Name | Description | Value | +| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `replicaCount` | Number of replicas for the K8s Inventory deployment | `1` | +| `image.pullPolicy` | Image pull policy used by the K8s Inventory deployment | `IfNotPresent` | +| `image.repository` | Image used for the K8s Inventory deployment | `anchore/k8s-inventory` | +| `image.tag` | Image tag used for the K8s Inventory deployment | `v1.1.1` | +| `imagePullSecrets` | secrets where Kubernetes should get the credentials for pulling private images | `[]` | +| `nameOverride` | overrides the name set on resources | `""` | +| `fullnameOverride` | overrides the fullname set on resources | `""` | +| `injectSecretsViaEnv` | Enable secret injection into pod via environment variables instead of via k8s secrets | `false` | +| `serviceAccount.create` | Create a service account for k8s-inventory to use | `true` | +| `serviceAccount.annotations` | Annotations to add to the service account | `{}` | +| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `k8s-inventory` | +| `podAnnotations` | Annotations set on all pods | `{}` | +| `annotations` | Common annotations set on all Kubernetes resources | `{}` | +| `podSecurityContext` | Security context set on all pods | `{}` | +| `securityContext` | Security context set on all containers | `{}` | +| `service.type` | Service type for K8s Inventory | `ClusterIP` | +| `service.port` | Service port for K8s Inventory | `80` | +| `resources` | Resource requests and limits for K8s Inventory pods | `{}` | +| `nodeSelector` | Node labels for K8s Inventory pods assignment | `{}` | +| `tolerations` | Tolerations for K8s Inventory pods assignment | `[]` | +| `affinity` | Affinity for K8s Inventory pods assignment | `{}` | +| `labels` | Adds additionnal labels to all kubernetes resources | `{}` | +| `probes.liveness.initialDelaySeconds` | Initial delay seconds for liveness probe | `1` | +| `probes.liveness.timeoutSeconds` | Timeout seconds for liveness probe | `10` | +| `probes.liveness.periodSeconds` | Period seconds for liveness probe | `5` | +| `probes.liveness.failureThreshold` | Failure threshold for liveness probe | `6` | +| `probes.liveness.successThreshold` | Success threshold for liveness probe | `1` | +| `probes.readiness.timeoutSeconds` | Timeout seconds for the readiness probe | `10` | +| `probes.readiness.periodSeconds` | Period seconds for the readiness probe | `15` | +| `probes.readiness.failureThreshold` | Failure threshold for the readiness probe | `3` | +| `probes.readiness.successThreshold` | Success threshold for the readiness probe | `1` | +| `useExistingSecret` | Specify whether to use an existing secret | `false` | +| `existingSecretName` | if using an existing secret, specify the existing secret name | `""` | + + +### k8sInventory Parameters ## + +| Name | Description | Value | +| ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `k8sInventory.output` | The output format of the report (options: table, json) | `json` | +| `k8sInventory.quiet` | Determine whether or not to log the inventory report to stdout | `false` | +| `k8sInventory.verboseInventoryReports` | Determine whether or not to log the inventory report to stdout | `false` | +| `k8sInventory.log.structured` | Determine whether or not to use structured logs | `false` | +| `k8sInventory.log.level` | the level of verbosity for logs | `debug` | +| `k8sInventory.log.file` | location to write the log file (default is not to have a log file) | `""` | +| `k8sInventory.kubeconfig.path` | Path should not be changed | `use-in-cluster` | +| `k8sInventory.kubeconfig.cluster` | Tells Anchore which cluster this inventory is coming from | `docker-desktop` | +| `k8sInventory.namespaceSelectors.include` | Which namespaces to search as explicit strings, not regex; Will search all namespaces if empty array | `[]` | +| `k8sInventory.namespaceSelectors.exclude` | Which namespaces to exclude can use explicit strings and/or regexes. | `[]` | +| `k8sInventory.mode` | Can be one of adhoc, periodic (defaults to adhoc) | `periodic` | +| `k8sInventory.pollingIntervalSeconds` | Only respected if mode is periodic | `60` | +| `k8sInventory.kubernetes.requestTimeoutSeconds` | Sets the request timeout for kubernetes API requests | `60` | +| `k8sInventory.kubernetes.requestBatchSize` | Sets the number of objects to iteratively return when listing resources | `100` | +| `k8sInventory.kubernetes.workerPoolSize` | Worker pool size for collecting pods from namespaces. Adjust this if the api-server gets overwhelmed | `100` | +| `k8sInventory.missingTagPolicy.policy` | One of the following options [digest, insert, drop]. Default is 'digest' | `digest` | +| `k8sInventory.missingTagPolicy.tag` | Dummy tag to use. Only applicable if policy is 'insert'. Defaults to UNKNOWN | `UNKNOWN` | +| `k8sInventory.ignoreNotRunning` | Ignore images out of pods that are not in a Running state | `true` | +| `k8sInventory.anchore.url` | the url of the anchore platform | `http://localhost:8228` | +| `k8sInventory.anchore.user` | the username of the anchore platform. The user specified must be an admin user or have full-control, or read-write RBAC permissions | `admin` | +| `k8sInventory.anchore.password` | the password of the anchore platform | `foobar` | +| `k8sInventory.anchore.account` | the account to send data to | `admin` | +| `k8sInventory.anchore.http.insecure` | whether or not anchore is using ssl/tls | `true` | +| `k8sInventory.anchore.http.timeoutSeconds` | the amount of time in seconds before timing out | `10` | diff --git a/stable/k8s-inventory/ci/fake-values.yaml b/stable/k8s-inventory/ci/fake-values.yaml new file mode 100644 index 00000000..607ad9cc --- /dev/null +++ b/stable/k8s-inventory/ci/fake-values.yaml @@ -0,0 +1,5 @@ +k8sInventory: + anchore: + url: http://engine-anchore-engine-api:8228 + user: user1 + password: password1 diff --git a/stable/k8s-inventory/templates/NOTES.txt b/stable/k8s-inventory/templates/NOTES.txt new file mode 100644 index 00000000..df5bd75b --- /dev/null +++ b/stable/k8s-inventory/templates/NOTES.txt @@ -0,0 +1,6 @@ +K8s Inventory is a tool to gather an inventory of images in use in your cluster and ship them to the Anchore platform. +K8s Inventory must be able to resolve the Anchore URL and requires API credentials. + +For more info see: https://github.com/anchore/k8s-inventory + +K8s Inventory is now installed. diff --git a/stable/k8s-inventory/templates/_helpers.tpl b/stable/k8s-inventory/templates/_helpers.tpl new file mode 100644 index 00000000..c675921f --- /dev/null +++ b/stable/k8s-inventory/templates/_helpers.tpl @@ -0,0 +1,78 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "k8sInventory.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "k8sInventory.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "k8sInventory.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "k8sInventory.labels" -}} +helm.sh/chart: {{ include "k8sInventory.chart" . }} +{{ include "k8sInventory.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +chart: {{ .Chart.Name }}-{{ .Chart.Version }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.labels }} +{{ toYaml . }} +{{- end }} +app: {{ include "k8sInventory.fullname" . }} +release: {{ .Release.Name }} +heritage: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "k8sInventory.selectorLabels" -}} +app.kubernetes.io/name: {{ include "k8sInventory.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "k8sInventory.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "k8sInventory.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Require Anchore endpoint and Anchore credentials +*/}} +{{- define "checkAnchoreRequisites" }} +{{- if or (not .Values.k8sInventory.anchore.url) (not .Values.k8sInventory.anchore.user) (and (not .Values.useExistingSecret) (not .Values.k8sInventory.anchore.password)) }} + {{- fail "Anchore endpoint and credentials are required. See the chart README for more instructions on configuring Anchore Requisites." }} +{{- end }} +{{- end }} diff --git a/stable/k8s-inventory/templates/cluster-role-binding.yaml b/stable/k8s-inventory/templates/cluster-role-binding.yaml new file mode 100644 index 00000000..b9bb06ff --- /dev/null +++ b/stable/k8s-inventory/templates/cluster-role-binding.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "k8sInventory.fullname" . }}-read-only-binding + {{- if .Values.annotations }} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8sInventory.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ include "k8sInventory.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "k8sInventory.fullname" . }}-read-only + apiGroup: rbac.authorization.k8s.io diff --git a/stable/k8s-inventory/templates/cluster-role-readonly.yaml b/stable/k8s-inventory/templates/cluster-role-readonly.yaml new file mode 100644 index 00000000..45a8ab3f --- /dev/null +++ b/stable/k8s-inventory/templates/cluster-role-readonly.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "k8sInventory.fullname" . }}-read-only + {{- if .Values.annotations }} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8sInventory.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: ["pods","namespaces", "nodes"] + verbs: ["get", "watch", "list"] diff --git a/stable/k8s-inventory/templates/configmap.yaml b/stable/k8s-inventory/templates/configmap.yaml new file mode 100644 index 00000000..3198ca6d --- /dev/null +++ b/stable/k8s-inventory/templates/configmap.yaml @@ -0,0 +1,46 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "k8sInventory.fullname" . }} + {{- if .Values.annotations }} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8sInventory.labels" . | nindent 4 }} +data: + config.yaml: | + verbose-inventory-reports: {{ .Values.k8sInventory.verboseInventoryReports | quote }} + kubeconfig: + path: {{ .Values.k8sInventory.kubeconfig.path }} + cluster: {{ .Values.k8sInventory.kubeconfig.cluster }} + output: {{ .Values.k8sInventory.output }} + quiet: {{ .Values.k8sInventory.quiet }} + log: + structured: {{ .Values.k8sInventory.log.structured }} + level: {{ .Values.k8sInventory.log.level }} + file: {{ .Values.k8sInventory.log.file }} + namespaces: + {{- toYaml .Values.k8sInventory.namespaces | nindent 6 }} + namespace-selectors: + {{- toYaml .Values.k8sInventory.namespaceSelectors | nindent 6 }} + mode: {{ .Values.k8sInventory.mode }} + polling-interval-seconds: {{ .Values.k8sInventory.pollingIntervalSeconds }} + kubernetes-request-timeout-seconds: {{ .Values.k8sInventory.kubernetesRequestTimeoutSeconds }} + kubernetes: + request-timeout-seconds: {{ .Values.k8sInventory.kubernetes.requestTimeoutSeconds }} + request-batch-size: {{ .Values.k8sInventory.kubernetes.requestBatchSize }} + worker-pool-size: {{ .Values.k8sInventory.kubernetes.workerPoolSize }} + ignore-not-running: {{ .Values.k8sInventory.ignoreNotRunning }} + missing-tag-policy: + {{- toYaml .Values.k8sInventory.missingTagPolicy | nindent 6 }} + anchore: + url: {{ .Values.k8sInventory.anchore.url }} + user: {{ .Values.k8sInventory.anchore.user }} + password: $ANCHORE_K8S_INVENTORY_ANCHORE_PASSWORD + account: {{ .Values.k8sInventory.anchore.account }} + http: + insecure: {{ .Values.k8sInventory.anchore.http.insecure }} + timeout-seconds: {{ .Values.k8sInventory.anchore.http.timeoutSeconds }} diff --git a/stable/k8s-inventory/templates/deployment.yaml b/stable/k8s-inventory/templates/deployment.yaml new file mode 100644 index 00000000..aaaa895a --- /dev/null +++ b/stable/k8s-inventory/templates/deployment.yaml @@ -0,0 +1,98 @@ +{{- template "checkAnchoreRequisites" . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "k8sInventory.fullname" . }} + {{- if .Values.annotations }} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8sInventory.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "k8sInventory.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.useExistingSecret }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + {{- include "k8sInventory.labels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "k8sInventory.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/anchore-k8s-inventory"] + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + exec: + command: + - /anchore-k8s-inventory + - version + initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }} + timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.liveness.periodSeconds }} + failureThreshold: {{ .Values.probes.liveness.failureThreshold }} + successThreshold: {{ .Values.probes.liveness.successThreshold }} + readinessProbe: + exec: + command: + - /anchore-k8s-inventory + - version + timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }} + periodSeconds: {{ .Values.probes.readiness.periodSeconds }} + failureThreshold: {{ .Values.probes.readiness.failureThreshold }} + successThreshold: {{ .Values.probes.readiness.successThreshold }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/xdg/anchore-k8s-inventory/config.yaml + subPath: config.yaml + envFrom: + {{- if not .Values.injectSecretsViaEnv }} + - secretRef: + name: {{ default (include "k8sInventory.fullname" .) .Values.existingSecretName }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ include "k8sInventory.fullname" . }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/stable/k8s-inventory/templates/secrets.yaml b/stable/k8s-inventory/templates/secrets.yaml new file mode 100644 index 00000000..37d81d92 --- /dev/null +++ b/stable/k8s-inventory/templates/secrets.yaml @@ -0,0 +1,17 @@ +{{- if not .Values.useExistingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "k8sInventory.fullname" . }} + {{- if .Values.annotations }} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8sInventory.labels" . | nindent 4 }} +type: Opaque +stringData: + ANCHORE_K8S_INVENTORY_ANCHORE_PASSWORD: {{ .Values.k8sInventory.anchore.password }} +{{- end }} diff --git a/stable/k8s-inventory/templates/service.yaml b/stable/k8s-inventory/templates/service.yaml new file mode 100644 index 00000000..a2e4bc47 --- /dev/null +++ b/stable/k8s-inventory/templates/service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "k8sInventory.fullname" . }} + {{- if .Values.annotations }} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8sInventory.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "k8sInventory.selectorLabels" . | nindent 4 }} diff --git a/stable/k8s-inventory/templates/serviceaccount.yaml b/stable/k8s-inventory/templates/serviceaccount.yaml new file mode 100644 index 00000000..fc424607 --- /dev/null +++ b/stable/k8s-inventory/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "k8sInventory.serviceAccountName" . }} + labels: + {{- include "k8sInventory.labels" . | nindent 4 }} + {{- if or (.Values.annotations) (.Values.serviceAccount.annotations)}} + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/stable/k8s-inventory/values.yaml b/stable/k8s-inventory/values.yaml new file mode 100644 index 00000000..18361f7f --- /dev/null +++ b/stable/k8s-inventory/values.yaml @@ -0,0 +1,213 @@ +################################################### +## @section Common Resource Parameters +## Common params used by all K8s Inventory resources +################################################### + +## @param replicaCount Number of replicas for the K8s Inventory deployment +## +replicaCount: 1 + +## @param image.pullPolicy Image pull policy used by the K8s Inventory deployment +## @param image.repository Image used for the K8s Inventory deployment +## @param image.tag Image tag used for the K8s Inventory deployment +## use tag v1.0.2-fips-amd64 if you want an image built for fips use +## +image: + pullPolicy: "IfNotPresent" + repository: "anchore/k8s-inventory" + tag: "v1.2.0" + +## @param imagePullSecrets secrets where Kubernetes should get the credentials for pulling private images +## +imagePullSecrets: [] + +## @param nameOverride overrides the name set on resources +## +nameOverride: "" + +## @param fullnameOverride overrides the fullname set on resources +## +fullnameOverride: "" + +## @param injectSecretsViaEnv Enable secret injection into pod via environment variables instead of via k8s secrets +## Useful for injecting secrets directly into k8s pods from Hashicorp vault +## +injectSecretsViaEnv: false + +## @param serviceAccount.create Create a service account for k8s-inventory to use +## @param serviceAccount.annotations Annotations to add to the service account +## @param serviceAccount.name The name of the service account to use. If not set and create is true, a name is generated using the fullname template. +## +serviceAccount: + create: true + annotations: {} + name: "k8s-inventory" + +## @param podAnnotations Annotations set on all pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param annotations Common annotations set on all Kubernetes resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +annotations: {} + +## @param podSecurityContext Security context set on all pods +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: {} + # fsGroup: 2000 + +## @param securityContext Security context set on all containers +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## +securityContext: {} + +## @param service.type Service type for K8s Inventory +## @param service.port Service port for K8s Inventory +service: + type: ClusterIP + port: 80 + +## @param resources Resource requests and limits for K8s Inventory pods +## +resources: {} + +## @param nodeSelector Node labels for K8s Inventory pods assignment +## +nodeSelector: {} + +## @param tolerations Tolerations for K8s Inventory pods assignment +## +tolerations: [] + +## @param affinity Affinity for K8s Inventory pods assignment +## +affinity: {} + +## @param labels Adds additionnal labels to all kubernetes resources +## +labels: {} + +## @param probes.liveness.initialDelaySeconds Initial delay seconds for liveness probe +## @param probes.liveness.timeoutSeconds Timeout seconds for liveness probe +## @param probes.liveness.periodSeconds Period seconds for liveness probe +## @param probes.liveness.failureThreshold Failure threshold for liveness probe +## @param probes.liveness.successThreshold Success threshold for liveness probe +## @param probes.readiness.timeoutSeconds Timeout seconds for the readiness probe +## @param probes.readiness.periodSeconds Period seconds for the readiness probe +## @param probes.readiness.failureThreshold Failure threshold for the readiness probe +## @param probes.readiness.successThreshold Success threshold for the readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## +probes: + liveness: + initialDelaySeconds: 1 + timeoutSeconds: 10 + periodSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readiness: + timeoutSeconds: 10 + periodSeconds: 15 + failureThreshold: 3 + successThreshold: 1 + +## @param useExistingSecret Specify whether to use an existing secret +## +useExistingSecret: false + +## @param existingSecretName if using an existing secret, specify the existing secret name +## +existingSecretName: "" + +###################################### +## @section k8sInventory Parameters ## +###################################### + +k8sInventory: + + ## @param k8sInventory.output The output format of the report (options: table, json) + ## + output: "json" + + ## @param k8sInventory.quiet Determine whether or not to log the inventory report to stdout + ## + quiet: false + + ## @param k8sInventory.verboseInventoryReports Determine whether or not to log the inventory report to stdout + ## + verboseInventoryReports: false + + ## @param k8sInventory.log.structured Determine whether or not to use structured logs + ## @param k8sInventory.log.level the level of verbosity for logs + ## @param k8sInventory.log.file location to write the log file (default is not to have a log file) + ## + log: + structured: false + level: "debug" + file: "" + + ## @param k8sInventory.kubeconfig.path Path should not be changed + ## @param k8sInventory.kubeconfig.cluster Tells Anchore which cluster this inventory is coming from + ## + kubeconfig: + path: use-in-cluster + cluster: docker-desktop + + ## @param k8sInventory.namespaceSelectors.include Which namespaces to search as explicit strings, not regex; Will search all namespaces if empty array + ## @param k8sInventory.namespaceSelectors.exclude Which namespaces to exclude can use explicit strings and/or regexes. + ## + namespaceSelectors: + include: [] + exclude: [] + + ## @param k8sInventory.mode Can be one of adhoc, periodic (defaults to adhoc) + ## + mode: periodic + + ## @param k8sInventory.pollingIntervalSeconds Only respected if mode is periodic + ## + pollingIntervalSeconds: 60 + + ### k8sInventory.kubernetes Kubernetes API configuration parameters (should not need tuning) + ## @param k8sInventory.kubernetes.requestTimeoutSeconds Sets the request timeout for kubernetes API requests + ## @param k8sInventory.kubernetes.requestBatchSize Sets the number of objects to iteratively return when listing resources + ## @param k8sInventory.kubernetes.workerPoolSize Worker pool size for collecting pods from namespaces. Adjust this if the api-server gets overwhelmed + # + kubernetes: + requestTimeoutSeconds: 60 + requestBatchSize: 100 + workerPoolSize: 100 + + ### k8sInventory.missingTagPolicy Handle cases where a tag is missing. For example - images designated by digest + ## @param k8sInventory.missingTagPolicy.policy One of the following options [digest, insert, drop]. Default is 'digest' + ### [digest] will use the image's digest as a dummy tag. + ### [insert] will insert a default tag in as a dummy tag. The dummy tag is + ### [drop] will drop images that do not have tags associated with them. Not recommended. + ## @param k8sInventory.missingTagPolicy.tag Dummy tag to use. Only applicable if policy is 'insert'. Defaults to UNKNOWN + ## + missingTagPolicy: + policy: digest + tag: UNKNOWN + + ## @param k8sInventory.ignoreNotRunning Ignore images out of pods that are not in a Running state + ## + ignoreNotRunning: true + + ## @param k8sInventory.anchore.url the url of the anchore platform + ## @param k8sInventory.anchore.user the username of the anchore platform. The user specified must be an admin user or have full-control, or read-write RBAC permissions + ## @param k8sInventory.anchore.password the password of the anchore platform + ## @param k8sInventory.anchore.account the account to send data to + ## @param k8sInventory.anchore.http.insecure whether or not anchore is using ssl/tls + ## @param k8sInventory.anchore.http.timeoutSeconds the amount of time in seconds before timing out + ## + anchore: + url: "" + user: "" + password: "" + account: "admin" + http: + insecure: true + timeoutSeconds: 10 diff --git a/stable/kai/Chart.yaml b/stable/kai/Chart.yaml index 1af3746f..1151bbc8 100644 --- a/stable/kai/Chart.yaml +++ b/stable/kai/Chart.yaml @@ -1,5 +1,7 @@ apiVersion: v2 name: kai +version: 0.5.1 +appVersion: 0.5.0 description: A Helm chart for Kubernetes Automated Inventory, which describes which images are in use in a given Kubernetes Cluster keywords: - analysis @@ -10,29 +12,10 @@ keywords: - inventory - security - scanner - +home: https://anchore.com maintainers: - - name: dakaneye - email: sam.dacanay@anchore.com - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.3.0 - -icon: https://anchore.com/wp-content/uploads/2016/08/anchore.png + - name: zhill + email: zach@anchore.com + - name: btodhunter + email: bradyt@anchore.com +icon: https://anchoreprd.wpengine.com/wp-content/uploads/2021/12/favicon.png diff --git a/stable/kai/README.md b/stable/kai/README.md index ee61cf1f..c7d3633e 100644 --- a/stable/kai/README.md +++ b/stable/kai/README.md @@ -1,7 +1,7 @@ # KAI Helm Chart KAI is the foundation of Anchore Enterprise's Runtime Inventory feature. Running KAI via Helm is a great way to retrieve your Kubernetes Image inventory without providing Cluster Credentials to Anchore. -KAI runs as a read-only service account in the cluster it's deployed to. +KAI runs as a read-only service account in the cluster it's deployed to. In order to report the inventory to Anchore, KAI does require authentication material for your Anchore Enterprise deployment. KAI's helm chart automatically creates a kubernetes secret for the Anchore Password based on the values file you use, Ex.: @@ -31,7 +31,7 @@ You can install the chart via via: ``` helm repo add anchore https://charts.anchore.io helm install -f anchore/kai -``` +``` A basic values file can always be found [here](https://github.com/anchore/anchore-charts/tree/master/stable/kai/values.yaml) The key configurations are in the kai.anchore section. Kai must be able to resolve the Anchore URL and requires API credentials. @@ -40,4 +40,4 @@ Note: the Anchore API Password can be provided via a kubernetes secret, or injec * For injecting the environment variable, see: inject_secrets_via_env * For providing your own secret for the Anchore API Password, see: kai.existing_secret. kai creates it's own secret based on your values.yaml file for key kai.anchore.password, but the kai.existingSecret key allows you to create your own secret and provide it in the values file. -See the [kai repo](https://github.com/anchore/kai) for more information about the KAI-specific configuration \ No newline at end of file +See the [kai repo](https://github.com/anchore/kai) for more information about the KAI-specific configuration diff --git a/stable/kai/templates/cluster-role-binding.yaml b/stable/kai/templates/cluster-role-binding.yaml index 96584425..971da80b 100644 --- a/stable/kai/templates/cluster-role-binding.yaml +++ b/stable/kai/templates/cluster-role-binding.yaml @@ -1,12 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: kai-read-only-binding + name: {{ include "kai.fullname" . }}-read-only-binding subjects: - kind: ServiceAccount name: {{ include "kai.serviceAccountName" . }} namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole - name: kai-read-only - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + name: {{ include "kai.fullname" . }}-read-only + apiGroup: rbac.authorization.k8s.io diff --git a/stable/kai/templates/cluster-role-readonly.yaml b/stable/kai/templates/cluster-role-readonly.yaml index ddcf9c5c..abb821a4 100644 --- a/stable/kai/templates/cluster-role-readonly.yaml +++ b/stable/kai/templates/cluster-role-readonly.yaml @@ -1,8 +1,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: kai-read-only + name: {{ include "kai.fullname" . }}-read-only rules: - apiGroups: [""] resources: ["pods","namespaces"] - verbs: ["get", "watch", "list"] \ No newline at end of file + verbs: ["get", "watch", "list"] diff --git a/stable/kai/templates/configmap.yaml b/stable/kai/templates/configmap.yaml index 5bd774ff..717f3813 100644 --- a/stable/kai/templates/configmap.yaml +++ b/stable/kai/templates/configmap.yaml @@ -8,7 +8,7 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: - .kai.yaml: | + config.yaml: | kubeconfig: path: {{ .Values.kai.kubeconfig.path }} cluster: {{ .Values.kai.kubeconfig.cluster }} @@ -40,3 +40,4 @@ data: http: insecure: {{ .Values.kai.anchore.http.insecure }} timeout-seconds: {{ .Values.kai.anchore.http.timeoutSeconds }} + verbose-inventory-reports: {{ .Values.kai.verboseInventoryReports }} diff --git a/stable/kai/templates/deployment.yaml b/stable/kai/templates/deployment.yaml index 04aee624..c8b49df6 100644 --- a/stable/kai/templates/deployment.yaml +++ b/stable/kai/templates/deployment.yaml @@ -60,8 +60,8 @@ spec: {{- toYaml .Values.resources | nindent 12 }} volumeMounts: - name: config-volume - mountPath: /home/nonroot/.kai.yaml - subPath: .kai.yaml + mountPath: /etc/xdg/kai/config.yaml + subPath: config.yaml envFrom: {{- if not .Values.inject_secrets_via_env }} - secretRef: diff --git a/stable/kai/values.yaml b/stable/kai/values.yaml index de53de73..ba3a1e9f 100644 --- a/stable/kai/values.yaml +++ b/stable/kai/values.yaml @@ -8,7 +8,7 @@ image: repository: anchore/kai pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: "v0.3.0" + tag: "v0.5.0" imagePullSecrets: [] nameOverride: "" @@ -164,3 +164,5 @@ kai: http: insecure: true timeoutSeconds: 10 + + verboseInventoryReports: false